##// END OF EJS Templates
localrepo: make requirements and openerreqs mutable by subclasses...
Bryan O'Sullivan -
r17137:b090601a default
parent child Browse files
Show More
@@ -1,2453 +1,2457 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 class localrepository(repo.repository):
26 class localrepository(repo.repository):
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 'known', 'getbundle'))
28 'known', 'getbundle'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
30 supported = supportedformats | set(('store', 'fncache', 'shared',
30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 'dotencode'))
31 'dotencode'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
33 requirements = ['revlogv1']
34
35 def _baserequirements(self, create):
36 return self.requirements[:]
32
37
33 def __init__(self, baseui, path=None, create=False):
38 def __init__(self, baseui, path=None, create=False):
34 repo.repository.__init__(self)
39 repo.repository.__init__(self)
35 self.root = os.path.realpath(util.expandpath(path))
40 self.root = os.path.realpath(util.expandpath(path))
36 self.path = os.path.join(self.root, ".hg")
41 self.path = os.path.join(self.root, ".hg")
37 self.origroot = path
42 self.origroot = path
38 self.auditor = scmutil.pathauditor(self.root, self._checknested)
43 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.opener = scmutil.opener(self.path)
44 self.opener = scmutil.opener(self.path)
40 self.wopener = scmutil.opener(self.root)
45 self.wopener = scmutil.opener(self.root)
41 self.baseui = baseui
46 self.baseui = baseui
42 self.ui = baseui.copy()
47 self.ui = baseui.copy()
43 # A list of callback to shape the phase if no data were found.
48 # A list of callback to shape the phase if no data were found.
44 # Callback are in the form: func(repo, roots) --> processed root.
49 # Callback are in the form: func(repo, roots) --> processed root.
45 # This list it to be filled by extension during repo setup
50 # This list it to be filled by extension during repo setup
46 self._phasedefaults = []
51 self._phasedefaults = []
47
52
48 try:
53 try:
49 self.ui.readconfig(self.join("hgrc"), self.root)
54 self.ui.readconfig(self.join("hgrc"), self.root)
50 extensions.loadall(self.ui)
55 extensions.loadall(self.ui)
51 except IOError:
56 except IOError:
52 pass
57 pass
53
58
54 if not os.path.isdir(self.path):
59 if not os.path.isdir(self.path):
55 if create:
60 if create:
56 if not os.path.exists(path):
61 if not os.path.exists(path):
57 util.makedirs(path)
62 util.makedirs(path)
58 util.makedir(self.path, notindexed=True)
63 util.makedir(self.path, notindexed=True)
59 requirements = ["revlogv1"]
64 requirements = self._baserequirements(create)
60 if self.ui.configbool('format', 'usestore', True):
65 if self.ui.configbool('format', 'usestore', True):
61 os.mkdir(os.path.join(self.path, "store"))
66 os.mkdir(os.path.join(self.path, "store"))
62 requirements.append("store")
67 requirements.append("store")
63 if self.ui.configbool('format', 'usefncache', True):
68 if self.ui.configbool('format', 'usefncache', True):
64 requirements.append("fncache")
69 requirements.append("fncache")
65 if self.ui.configbool('format', 'dotencode', True):
70 if self.ui.configbool('format', 'dotencode', True):
66 requirements.append('dotencode')
71 requirements.append('dotencode')
67 # create an invalid changelog
72 # create an invalid changelog
68 self.opener.append(
73 self.opener.append(
69 "00changelog.i",
74 "00changelog.i",
70 '\0\0\0\2' # represents revlogv2
75 '\0\0\0\2' # represents revlogv2
71 ' dummy changelog to prevent using the old repo layout'
76 ' dummy changelog to prevent using the old repo layout'
72 )
77 )
73 if self.ui.configbool('format', 'generaldelta', False):
78 if self.ui.configbool('format', 'generaldelta', False):
74 requirements.append("generaldelta")
79 requirements.append("generaldelta")
75 requirements = set(requirements)
80 requirements = set(requirements)
76 else:
81 else:
77 raise error.RepoError(_("repository %s not found") % path)
82 raise error.RepoError(_("repository %s not found") % path)
78 elif create:
83 elif create:
79 raise error.RepoError(_("repository %s already exists") % path)
84 raise error.RepoError(_("repository %s already exists") % path)
80 else:
85 else:
81 try:
86 try:
82 requirements = scmutil.readrequires(self.opener, self.supported)
87 requirements = scmutil.readrequires(self.opener, self.supported)
83 except IOError, inst:
88 except IOError, inst:
84 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
85 raise
90 raise
86 requirements = set()
91 requirements = set()
87
92
88 self.sharedpath = self.path
93 self.sharedpath = self.path
89 try:
94 try:
90 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
95 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 if not os.path.exists(s):
96 if not os.path.exists(s):
92 raise error.RepoError(
97 raise error.RepoError(
93 _('.hg/sharedpath points to nonexistent directory %s') % s)
98 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 self.sharedpath = s
99 self.sharedpath = s
95 except IOError, inst:
100 except IOError, inst:
96 if inst.errno != errno.ENOENT:
101 if inst.errno != errno.ENOENT:
97 raise
102 raise
98
103
99 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
104 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.spath = self.store.path
105 self.spath = self.store.path
101 self.sopener = self.store.opener
106 self.sopener = self.store.opener
102 self.sjoin = self.store.join
107 self.sjoin = self.store.join
103 self.opener.createmode = self.store.createmode
108 self.opener.createmode = self.store.createmode
104 self._applyrequirements(requirements)
109 self._applyrequirements(requirements)
105 if create:
110 if create:
106 self._writerequirements()
111 self._writerequirements()
107
112
108
113
109 self._branchcache = None
114 self._branchcache = None
110 self._branchcachetip = None
115 self._branchcachetip = None
111 self.filterpats = {}
116 self.filterpats = {}
112 self._datafilters = {}
117 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
118 self._transref = self._lockref = self._wlockref = None
114
119
115 # A cache for various files under .hg/ that tracks file changes,
120 # A cache for various files under .hg/ that tracks file changes,
116 # (used by the filecache decorator)
121 # (used by the filecache decorator)
117 #
122 #
118 # Maps a property name to its util.filecacheentry
123 # Maps a property name to its util.filecacheentry
119 self._filecache = {}
124 self._filecache = {}
120
125
121 def _applyrequirements(self, requirements):
126 def _applyrequirements(self, requirements):
122 self.requirements = requirements
127 self.requirements = requirements
123 openerreqs = set(('revlogv1', 'generaldelta'))
124 self.sopener.options = dict((r, 1) for r in requirements
128 self.sopener.options = dict((r, 1) for r in requirements
125 if r in openerreqs)
129 if r in self.openerreqs)
126
130
127 def _writerequirements(self):
131 def _writerequirements(self):
128 reqfile = self.opener("requires", "w")
132 reqfile = self.opener("requires", "w")
129 for r in self.requirements:
133 for r in self.requirements:
130 reqfile.write("%s\n" % r)
134 reqfile.write("%s\n" % r)
131 reqfile.close()
135 reqfile.close()
132
136
133 def _checknested(self, path):
137 def _checknested(self, path):
134 """Determine if path is a legal nested repository."""
138 """Determine if path is a legal nested repository."""
135 if not path.startswith(self.root):
139 if not path.startswith(self.root):
136 return False
140 return False
137 subpath = path[len(self.root) + 1:]
141 subpath = path[len(self.root) + 1:]
138 normsubpath = util.pconvert(subpath)
142 normsubpath = util.pconvert(subpath)
139
143
140 # XXX: Checking against the current working copy is wrong in
144 # XXX: Checking against the current working copy is wrong in
141 # the sense that it can reject things like
145 # the sense that it can reject things like
142 #
146 #
143 # $ hg cat -r 10 sub/x.txt
147 # $ hg cat -r 10 sub/x.txt
144 #
148 #
145 # if sub/ is no longer a subrepository in the working copy
149 # if sub/ is no longer a subrepository in the working copy
146 # parent revision.
150 # parent revision.
147 #
151 #
148 # However, it can of course also allow things that would have
152 # However, it can of course also allow things that would have
149 # been rejected before, such as the above cat command if sub/
153 # been rejected before, such as the above cat command if sub/
150 # is a subrepository now, but was a normal directory before.
154 # is a subrepository now, but was a normal directory before.
151 # The old path auditor would have rejected by mistake since it
155 # The old path auditor would have rejected by mistake since it
152 # panics when it sees sub/.hg/.
156 # panics when it sees sub/.hg/.
153 #
157 #
154 # All in all, checking against the working copy seems sensible
158 # All in all, checking against the working copy seems sensible
155 # since we want to prevent access to nested repositories on
159 # since we want to prevent access to nested repositories on
156 # the filesystem *now*.
160 # the filesystem *now*.
157 ctx = self[None]
161 ctx = self[None]
158 parts = util.splitpath(subpath)
162 parts = util.splitpath(subpath)
159 while parts:
163 while parts:
160 prefix = '/'.join(parts)
164 prefix = '/'.join(parts)
161 if prefix in ctx.substate:
165 if prefix in ctx.substate:
162 if prefix == normsubpath:
166 if prefix == normsubpath:
163 return True
167 return True
164 else:
168 else:
165 sub = ctx.sub(prefix)
169 sub = ctx.sub(prefix)
166 return sub.checknested(subpath[len(prefix) + 1:])
170 return sub.checknested(subpath[len(prefix) + 1:])
167 else:
171 else:
168 parts.pop()
172 parts.pop()
169 return False
173 return False
170
174
171 @filecache('bookmarks')
175 @filecache('bookmarks')
172 def _bookmarks(self):
176 def _bookmarks(self):
173 return bookmarks.read(self)
177 return bookmarks.read(self)
174
178
175 @filecache('bookmarks.current')
179 @filecache('bookmarks.current')
176 def _bookmarkcurrent(self):
180 def _bookmarkcurrent(self):
177 return bookmarks.readcurrent(self)
181 return bookmarks.readcurrent(self)
178
182
179 def _writebookmarks(self, marks):
183 def _writebookmarks(self, marks):
180 bookmarks.write(self)
184 bookmarks.write(self)
181
185
182 def bookmarkheads(self, bookmark):
186 def bookmarkheads(self, bookmark):
183 name = bookmark.split('@', 1)[0]
187 name = bookmark.split('@', 1)[0]
184 heads = []
188 heads = []
185 for mark, n in self._bookmarks.iteritems():
189 for mark, n in self._bookmarks.iteritems():
186 if mark.split('@', 1)[0] == name:
190 if mark.split('@', 1)[0] == name:
187 heads.append(n)
191 heads.append(n)
188 return heads
192 return heads
189
193
190 @storecache('phaseroots')
194 @storecache('phaseroots')
191 def _phasecache(self):
195 def _phasecache(self):
192 return phases.phasecache(self, self._phasedefaults)
196 return phases.phasecache(self, self._phasedefaults)
193
197
194 @storecache('obsstore')
198 @storecache('obsstore')
195 def obsstore(self):
199 def obsstore(self):
196 store = obsolete.obsstore()
200 store = obsolete.obsstore()
197 data = self.sopener.tryread('obsstore')
201 data = self.sopener.tryread('obsstore')
198 if data:
202 if data:
199 store.loadmarkers(data)
203 store.loadmarkers(data)
200 return store
204 return store
201
205
202 @storecache('00changelog.i')
206 @storecache('00changelog.i')
203 def changelog(self):
207 def changelog(self):
204 c = changelog.changelog(self.sopener)
208 c = changelog.changelog(self.sopener)
205 if 'HG_PENDING' in os.environ:
209 if 'HG_PENDING' in os.environ:
206 p = os.environ['HG_PENDING']
210 p = os.environ['HG_PENDING']
207 if p.startswith(self.root):
211 if p.startswith(self.root):
208 c.readpending('00changelog.i.a')
212 c.readpending('00changelog.i.a')
209 return c
213 return c
210
214
211 @storecache('00manifest.i')
215 @storecache('00manifest.i')
212 def manifest(self):
216 def manifest(self):
213 return manifest.manifest(self.sopener)
217 return manifest.manifest(self.sopener)
214
218
215 @filecache('dirstate')
219 @filecache('dirstate')
216 def dirstate(self):
220 def dirstate(self):
217 warned = [0]
221 warned = [0]
218 def validate(node):
222 def validate(node):
219 try:
223 try:
220 self.changelog.rev(node)
224 self.changelog.rev(node)
221 return node
225 return node
222 except error.LookupError:
226 except error.LookupError:
223 if not warned[0]:
227 if not warned[0]:
224 warned[0] = True
228 warned[0] = True
225 self.ui.warn(_("warning: ignoring unknown"
229 self.ui.warn(_("warning: ignoring unknown"
226 " working parent %s!\n") % short(node))
230 " working parent %s!\n") % short(node))
227 return nullid
231 return nullid
228
232
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230
234
231 def __getitem__(self, changeid):
235 def __getitem__(self, changeid):
232 if changeid is None:
236 if changeid is None:
233 return context.workingctx(self)
237 return context.workingctx(self)
234 return context.changectx(self, changeid)
238 return context.changectx(self, changeid)
235
239
236 def __contains__(self, changeid):
240 def __contains__(self, changeid):
237 try:
241 try:
238 return bool(self.lookup(changeid))
242 return bool(self.lookup(changeid))
239 except error.RepoLookupError:
243 except error.RepoLookupError:
240 return False
244 return False
241
245
242 def __nonzero__(self):
246 def __nonzero__(self):
243 return True
247 return True
244
248
245 def __len__(self):
249 def __len__(self):
246 return len(self.changelog)
250 return len(self.changelog)
247
251
248 def __iter__(self):
252 def __iter__(self):
249 for i in xrange(len(self)):
253 for i in xrange(len(self)):
250 yield i
254 yield i
251
255
252 def revs(self, expr, *args):
256 def revs(self, expr, *args):
253 '''Return a list of revisions matching the given revset'''
257 '''Return a list of revisions matching the given revset'''
254 expr = revset.formatspec(expr, *args)
258 expr = revset.formatspec(expr, *args)
255 m = revset.match(None, expr)
259 m = revset.match(None, expr)
256 return [r for r in m(self, range(len(self)))]
260 return [r for r in m(self, range(len(self)))]
257
261
258 def set(self, expr, *args):
262 def set(self, expr, *args):
259 '''
263 '''
260 Yield a context for each matching revision, after doing arg
264 Yield a context for each matching revision, after doing arg
261 replacement via revset.formatspec
265 replacement via revset.formatspec
262 '''
266 '''
263 for r in self.revs(expr, *args):
267 for r in self.revs(expr, *args):
264 yield self[r]
268 yield self[r]
265
269
266 def url(self):
270 def url(self):
267 return 'file:' + self.root
271 return 'file:' + self.root
268
272
269 def hook(self, name, throw=False, **args):
273 def hook(self, name, throw=False, **args):
270 return hook.hook(self.ui, self, name, throw, **args)
274 return hook.hook(self.ui, self, name, throw, **args)
271
275
272 tag_disallowed = ':\r\n'
276 tag_disallowed = ':\r\n'
273
277
274 def _tag(self, names, node, message, local, user, date, extra={}):
278 def _tag(self, names, node, message, local, user, date, extra={}):
275 if isinstance(names, str):
279 if isinstance(names, str):
276 allchars = names
280 allchars = names
277 names = (names,)
281 names = (names,)
278 else:
282 else:
279 allchars = ''.join(names)
283 allchars = ''.join(names)
280 for c in self.tag_disallowed:
284 for c in self.tag_disallowed:
281 if c in allchars:
285 if c in allchars:
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
283
287
284 branches = self.branchmap()
288 branches = self.branchmap()
285 for name in names:
289 for name in names:
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 local=local)
291 local=local)
288 if name in branches:
292 if name in branches:
289 self.ui.warn(_("warning: tag %s conflicts with existing"
293 self.ui.warn(_("warning: tag %s conflicts with existing"
290 " branch name\n") % name)
294 " branch name\n") % name)
291
295
292 def writetags(fp, names, munge, prevtags):
296 def writetags(fp, names, munge, prevtags):
293 fp.seek(0, 2)
297 fp.seek(0, 2)
294 if prevtags and prevtags[-1] != '\n':
298 if prevtags and prevtags[-1] != '\n':
295 fp.write('\n')
299 fp.write('\n')
296 for name in names:
300 for name in names:
297 m = munge and munge(name) or name
301 m = munge and munge(name) or name
298 if (self._tagscache.tagtypes and
302 if (self._tagscache.tagtypes and
299 name in self._tagscache.tagtypes):
303 name in self._tagscache.tagtypes):
300 old = self.tags().get(name, nullid)
304 old = self.tags().get(name, nullid)
301 fp.write('%s %s\n' % (hex(old), m))
305 fp.write('%s %s\n' % (hex(old), m))
302 fp.write('%s %s\n' % (hex(node), m))
306 fp.write('%s %s\n' % (hex(node), m))
303 fp.close()
307 fp.close()
304
308
305 prevtags = ''
309 prevtags = ''
306 if local:
310 if local:
307 try:
311 try:
308 fp = self.opener('localtags', 'r+')
312 fp = self.opener('localtags', 'r+')
309 except IOError:
313 except IOError:
310 fp = self.opener('localtags', 'a')
314 fp = self.opener('localtags', 'a')
311 else:
315 else:
312 prevtags = fp.read()
316 prevtags = fp.read()
313
317
314 # local tags are stored in the current charset
318 # local tags are stored in the current charset
315 writetags(fp, names, None, prevtags)
319 writetags(fp, names, None, prevtags)
316 for name in names:
320 for name in names:
317 self.hook('tag', node=hex(node), tag=name, local=local)
321 self.hook('tag', node=hex(node), tag=name, local=local)
318 return
322 return
319
323
320 try:
324 try:
321 fp = self.wfile('.hgtags', 'rb+')
325 fp = self.wfile('.hgtags', 'rb+')
322 except IOError, e:
326 except IOError, e:
323 if e.errno != errno.ENOENT:
327 if e.errno != errno.ENOENT:
324 raise
328 raise
325 fp = self.wfile('.hgtags', 'ab')
329 fp = self.wfile('.hgtags', 'ab')
326 else:
330 else:
327 prevtags = fp.read()
331 prevtags = fp.read()
328
332
329 # committed tags are stored in UTF-8
333 # committed tags are stored in UTF-8
330 writetags(fp, names, encoding.fromlocal, prevtags)
334 writetags(fp, names, encoding.fromlocal, prevtags)
331
335
332 fp.close()
336 fp.close()
333
337
334 self.invalidatecaches()
338 self.invalidatecaches()
335
339
336 if '.hgtags' not in self.dirstate:
340 if '.hgtags' not in self.dirstate:
337 self[None].add(['.hgtags'])
341 self[None].add(['.hgtags'])
338
342
339 m = matchmod.exact(self.root, '', ['.hgtags'])
343 m = matchmod.exact(self.root, '', ['.hgtags'])
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
341
345
342 for name in names:
346 for name in names:
343 self.hook('tag', node=hex(node), tag=name, local=local)
347 self.hook('tag', node=hex(node), tag=name, local=local)
344
348
345 return tagnode
349 return tagnode
346
350
347 def tag(self, names, node, message, local, user, date):
351 def tag(self, names, node, message, local, user, date):
348 '''tag a revision with one or more symbolic names.
352 '''tag a revision with one or more symbolic names.
349
353
350 names is a list of strings or, when adding a single tag, names may be a
354 names is a list of strings or, when adding a single tag, names may be a
351 string.
355 string.
352
356
353 if local is True, the tags are stored in a per-repository file.
357 if local is True, the tags are stored in a per-repository file.
354 otherwise, they are stored in the .hgtags file, and a new
358 otherwise, they are stored in the .hgtags file, and a new
355 changeset is committed with the change.
359 changeset is committed with the change.
356
360
357 keyword arguments:
361 keyword arguments:
358
362
359 local: whether to store tags in non-version-controlled file
363 local: whether to store tags in non-version-controlled file
360 (default False)
364 (default False)
361
365
362 message: commit message to use if committing
366 message: commit message to use if committing
363
367
364 user: name of user to use if committing
368 user: name of user to use if committing
365
369
366 date: date tuple to use if committing'''
370 date: date tuple to use if committing'''
367
371
368 if not local:
372 if not local:
369 for x in self.status()[:5]:
373 for x in self.status()[:5]:
370 if '.hgtags' in x:
374 if '.hgtags' in x:
371 raise util.Abort(_('working copy of .hgtags is changed '
375 raise util.Abort(_('working copy of .hgtags is changed '
372 '(please commit .hgtags manually)'))
376 '(please commit .hgtags manually)'))
373
377
374 self.tags() # instantiate the cache
378 self.tags() # instantiate the cache
375 self._tag(names, node, message, local, user, date)
379 self._tag(names, node, message, local, user, date)
376
380
377 @propertycache
381 @propertycache
378 def _tagscache(self):
382 def _tagscache(self):
379 '''Returns a tagscache object that contains various tags related
383 '''Returns a tagscache object that contains various tags related
380 caches.'''
384 caches.'''
381
385
382 # This simplifies its cache management by having one decorated
386 # This simplifies its cache management by having one decorated
383 # function (this one) and the rest simply fetch things from it.
387 # function (this one) and the rest simply fetch things from it.
384 class tagscache(object):
388 class tagscache(object):
385 def __init__(self):
389 def __init__(self):
386 # These two define the set of tags for this repository. tags
390 # These two define the set of tags for this repository. tags
387 # maps tag name to node; tagtypes maps tag name to 'global' or
391 # maps tag name to node; tagtypes maps tag name to 'global' or
388 # 'local'. (Global tags are defined by .hgtags across all
392 # 'local'. (Global tags are defined by .hgtags across all
389 # heads, and local tags are defined in .hg/localtags.)
393 # heads, and local tags are defined in .hg/localtags.)
390 # They constitute the in-memory cache of tags.
394 # They constitute the in-memory cache of tags.
391 self.tags = self.tagtypes = None
395 self.tags = self.tagtypes = None
392
396
393 self.nodetagscache = self.tagslist = None
397 self.nodetagscache = self.tagslist = None
394
398
395 cache = tagscache()
399 cache = tagscache()
396 cache.tags, cache.tagtypes = self._findtags()
400 cache.tags, cache.tagtypes = self._findtags()
397
401
398 return cache
402 return cache
399
403
400 def tags(self):
404 def tags(self):
401 '''return a mapping of tag to node'''
405 '''return a mapping of tag to node'''
402 t = {}
406 t = {}
403 for k, v in self._tagscache.tags.iteritems():
407 for k, v in self._tagscache.tags.iteritems():
404 try:
408 try:
405 # ignore tags to unknown nodes
409 # ignore tags to unknown nodes
406 self.changelog.rev(v)
410 self.changelog.rev(v)
407 t[k] = v
411 t[k] = v
408 except (error.LookupError, ValueError):
412 except (error.LookupError, ValueError):
409 pass
413 pass
410 return t
414 return t
411
415
412 def _findtags(self):
416 def _findtags(self):
413 '''Do the hard work of finding tags. Return a pair of dicts
417 '''Do the hard work of finding tags. Return a pair of dicts
414 (tags, tagtypes) where tags maps tag name to node, and tagtypes
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
415 maps tag name to a string like \'global\' or \'local\'.
419 maps tag name to a string like \'global\' or \'local\'.
416 Subclasses or extensions are free to add their own tags, but
420 Subclasses or extensions are free to add their own tags, but
417 should be aware that the returned dicts will be retained for the
421 should be aware that the returned dicts will be retained for the
418 duration of the localrepo object.'''
422 duration of the localrepo object.'''
419
423
420 # XXX what tagtype should subclasses/extensions use? Currently
424 # XXX what tagtype should subclasses/extensions use? Currently
421 # mq and bookmarks add tags, but do not set the tagtype at all.
425 # mq and bookmarks add tags, but do not set the tagtype at all.
422 # Should each extension invent its own tag type? Should there
426 # Should each extension invent its own tag type? Should there
423 # be one tagtype for all such "virtual" tags? Or is the status
427 # be one tagtype for all such "virtual" tags? Or is the status
424 # quo fine?
428 # quo fine?
425
429
426 alltags = {} # map tag name to (node, hist)
430 alltags = {} # map tag name to (node, hist)
427 tagtypes = {}
431 tagtypes = {}
428
432
429 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
430 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
431
435
432 # Build the return dicts. Have to re-encode tag names because
436 # Build the return dicts. Have to re-encode tag names because
433 # the tags module always uses UTF-8 (in order not to lose info
437 # the tags module always uses UTF-8 (in order not to lose info
434 # writing to the cache), but the rest of Mercurial wants them in
438 # writing to the cache), but the rest of Mercurial wants them in
435 # local encoding.
439 # local encoding.
436 tags = {}
440 tags = {}
437 for (name, (node, hist)) in alltags.iteritems():
441 for (name, (node, hist)) in alltags.iteritems():
438 if node != nullid:
442 if node != nullid:
439 tags[encoding.tolocal(name)] = node
443 tags[encoding.tolocal(name)] = node
440 tags['tip'] = self.changelog.tip()
444 tags['tip'] = self.changelog.tip()
441 tagtypes = dict([(encoding.tolocal(name), value)
445 tagtypes = dict([(encoding.tolocal(name), value)
442 for (name, value) in tagtypes.iteritems()])
446 for (name, value) in tagtypes.iteritems()])
443 return (tags, tagtypes)
447 return (tags, tagtypes)
444
448
445 def tagtype(self, tagname):
449 def tagtype(self, tagname):
446 '''
450 '''
447 return the type of the given tag. result can be:
451 return the type of the given tag. result can be:
448
452
449 'local' : a local tag
453 'local' : a local tag
450 'global' : a global tag
454 'global' : a global tag
451 None : tag does not exist
455 None : tag does not exist
452 '''
456 '''
453
457
454 return self._tagscache.tagtypes.get(tagname)
458 return self._tagscache.tagtypes.get(tagname)
455
459
456 def tagslist(self):
460 def tagslist(self):
457 '''return a list of tags ordered by revision'''
461 '''return a list of tags ordered by revision'''
458 if not self._tagscache.tagslist:
462 if not self._tagscache.tagslist:
459 l = []
463 l = []
460 for t, n in self.tags().iteritems():
464 for t, n in self.tags().iteritems():
461 r = self.changelog.rev(n)
465 r = self.changelog.rev(n)
462 l.append((r, t, n))
466 l.append((r, t, n))
463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
464
468
465 return self._tagscache.tagslist
469 return self._tagscache.tagslist
466
470
467 def nodetags(self, node):
471 def nodetags(self, node):
468 '''return the tags associated with a node'''
472 '''return the tags associated with a node'''
469 if not self._tagscache.nodetagscache:
473 if not self._tagscache.nodetagscache:
470 nodetagscache = {}
474 nodetagscache = {}
471 for t, n in self._tagscache.tags.iteritems():
475 for t, n in self._tagscache.tags.iteritems():
472 nodetagscache.setdefault(n, []).append(t)
476 nodetagscache.setdefault(n, []).append(t)
473 for tags in nodetagscache.itervalues():
477 for tags in nodetagscache.itervalues():
474 tags.sort()
478 tags.sort()
475 self._tagscache.nodetagscache = nodetagscache
479 self._tagscache.nodetagscache = nodetagscache
476 return self._tagscache.nodetagscache.get(node, [])
480 return self._tagscache.nodetagscache.get(node, [])
477
481
478 def nodebookmarks(self, node):
482 def nodebookmarks(self, node):
479 marks = []
483 marks = []
480 for bookmark, n in self._bookmarks.iteritems():
484 for bookmark, n in self._bookmarks.iteritems():
481 if n == node:
485 if n == node:
482 marks.append(bookmark)
486 marks.append(bookmark)
483 return sorted(marks)
487 return sorted(marks)
484
488
485 def _branchtags(self, partial, lrev):
489 def _branchtags(self, partial, lrev):
486 # TODO: rename this function?
490 # TODO: rename this function?
487 tiprev = len(self) - 1
491 tiprev = len(self) - 1
488 if lrev != tiprev:
492 if lrev != tiprev:
489 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
490 self._updatebranchcache(partial, ctxgen)
494 self._updatebranchcache(partial, ctxgen)
491 self._writebranchcache(partial, self.changelog.tip(), tiprev)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
492
496
493 return partial
497 return partial
494
498
495 def updatebranchcache(self):
499 def updatebranchcache(self):
496 tip = self.changelog.tip()
500 tip = self.changelog.tip()
497 if self._branchcache is not None and self._branchcachetip == tip:
501 if self._branchcache is not None and self._branchcachetip == tip:
498 return
502 return
499
503
500 oldtip = self._branchcachetip
504 oldtip = self._branchcachetip
501 self._branchcachetip = tip
505 self._branchcachetip = tip
502 if oldtip is None or oldtip not in self.changelog.nodemap:
506 if oldtip is None or oldtip not in self.changelog.nodemap:
503 partial, last, lrev = self._readbranchcache()
507 partial, last, lrev = self._readbranchcache()
504 else:
508 else:
505 lrev = self.changelog.rev(oldtip)
509 lrev = self.changelog.rev(oldtip)
506 partial = self._branchcache
510 partial = self._branchcache
507
511
508 self._branchtags(partial, lrev)
512 self._branchtags(partial, lrev)
509 # this private cache holds all heads (not just the branch tips)
513 # this private cache holds all heads (not just the branch tips)
510 self._branchcache = partial
514 self._branchcache = partial
511
515
512 def branchmap(self):
516 def branchmap(self):
513 '''returns a dictionary {branch: [branchheads]}'''
517 '''returns a dictionary {branch: [branchheads]}'''
514 self.updatebranchcache()
518 self.updatebranchcache()
515 return self._branchcache
519 return self._branchcache
516
520
517 def _branchtip(self, heads):
521 def _branchtip(self, heads):
518 '''return the tipmost branch head in heads'''
522 '''return the tipmost branch head in heads'''
519 tip = heads[-1]
523 tip = heads[-1]
520 for h in reversed(heads):
524 for h in reversed(heads):
521 if not self[h].closesbranch():
525 if not self[h].closesbranch():
522 tip = h
526 tip = h
523 break
527 break
524 return tip
528 return tip
525
529
526 def branchtip(self, branch):
530 def branchtip(self, branch):
527 '''return the tip node for a given branch'''
531 '''return the tip node for a given branch'''
528 if branch not in self.branchmap():
532 if branch not in self.branchmap():
529 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
530 return self._branchtip(self.branchmap()[branch])
534 return self._branchtip(self.branchmap()[branch])
531
535
532 def branchtags(self):
536 def branchtags(self):
533 '''return a dict where branch names map to the tipmost head of
537 '''return a dict where branch names map to the tipmost head of
534 the branch, open heads come before closed'''
538 the branch, open heads come before closed'''
535 bt = {}
539 bt = {}
536 for bn, heads in self.branchmap().iteritems():
540 for bn, heads in self.branchmap().iteritems():
537 bt[bn] = self._branchtip(heads)
541 bt[bn] = self._branchtip(heads)
538 return bt
542 return bt
539
543
540 def _readbranchcache(self):
544 def _readbranchcache(self):
541 partial = {}
545 partial = {}
542 try:
546 try:
543 f = self.opener("cache/branchheads")
547 f = self.opener("cache/branchheads")
544 lines = f.read().split('\n')
548 lines = f.read().split('\n')
545 f.close()
549 f.close()
546 except (IOError, OSError):
550 except (IOError, OSError):
547 return {}, nullid, nullrev
551 return {}, nullid, nullrev
548
552
549 try:
553 try:
550 last, lrev = lines.pop(0).split(" ", 1)
554 last, lrev = lines.pop(0).split(" ", 1)
551 last, lrev = bin(last), int(lrev)
555 last, lrev = bin(last), int(lrev)
552 if lrev >= len(self) or self[lrev].node() != last:
556 if lrev >= len(self) or self[lrev].node() != last:
553 # invalidate the cache
557 # invalidate the cache
554 raise ValueError('invalidating branch cache (tip differs)')
558 raise ValueError('invalidating branch cache (tip differs)')
555 for l in lines:
559 for l in lines:
556 if not l:
560 if not l:
557 continue
561 continue
558 node, label = l.split(" ", 1)
562 node, label = l.split(" ", 1)
559 label = encoding.tolocal(label.strip())
563 label = encoding.tolocal(label.strip())
560 if not node in self:
564 if not node in self:
561 raise ValueError('invalidating branch cache because node '+
565 raise ValueError('invalidating branch cache because node '+
562 '%s does not exist' % node)
566 '%s does not exist' % node)
563 partial.setdefault(label, []).append(bin(node))
567 partial.setdefault(label, []).append(bin(node))
564 except KeyboardInterrupt:
568 except KeyboardInterrupt:
565 raise
569 raise
566 except Exception, inst:
570 except Exception, inst:
567 if self.ui.debugflag:
571 if self.ui.debugflag:
568 self.ui.warn(str(inst), '\n')
572 self.ui.warn(str(inst), '\n')
569 partial, last, lrev = {}, nullid, nullrev
573 partial, last, lrev = {}, nullid, nullrev
570 return partial, last, lrev
574 return partial, last, lrev
571
575
572 def _writebranchcache(self, branches, tip, tiprev):
576 def _writebranchcache(self, branches, tip, tiprev):
573 try:
577 try:
574 f = self.opener("cache/branchheads", "w", atomictemp=True)
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
575 f.write("%s %s\n" % (hex(tip), tiprev))
579 f.write("%s %s\n" % (hex(tip), tiprev))
576 for label, nodes in branches.iteritems():
580 for label, nodes in branches.iteritems():
577 for node in nodes:
581 for node in nodes:
578 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
579 f.close()
583 f.close()
580 except (IOError, OSError):
584 except (IOError, OSError):
581 pass
585 pass
582
586
583 def _updatebranchcache(self, partial, ctxgen):
587 def _updatebranchcache(self, partial, ctxgen):
584 """Given a branchhead cache, partial, that may have extra nodes or be
588 """Given a branchhead cache, partial, that may have extra nodes or be
585 missing heads, and a generator of nodes that are at least a superset of
589 missing heads, and a generator of nodes that are at least a superset of
586 heads missing, this function updates partial to be correct.
590 heads missing, this function updates partial to be correct.
587 """
591 """
588 # collect new branch entries
592 # collect new branch entries
589 newbranches = {}
593 newbranches = {}
590 for c in ctxgen:
594 for c in ctxgen:
591 newbranches.setdefault(c.branch(), []).append(c.node())
595 newbranches.setdefault(c.branch(), []).append(c.node())
592 # if older branchheads are reachable from new ones, they aren't
596 # if older branchheads are reachable from new ones, they aren't
593 # really branchheads. Note checking parents is insufficient:
597 # really branchheads. Note checking parents is insufficient:
594 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
595 for branch, newnodes in newbranches.iteritems():
599 for branch, newnodes in newbranches.iteritems():
596 bheads = partial.setdefault(branch, [])
600 bheads = partial.setdefault(branch, [])
597 # Remove candidate heads that no longer are in the repo (e.g., as
601 # Remove candidate heads that no longer are in the repo (e.g., as
598 # the result of a strip that just happened). Avoid using 'node in
602 # the result of a strip that just happened). Avoid using 'node in
599 # self' here because that dives down into branchcache code somewhat
603 # self' here because that dives down into branchcache code somewhat
600 # recrusively.
604 # recrusively.
601 bheadrevs = [self.changelog.rev(node) for node in bheads
605 bheadrevs = [self.changelog.rev(node) for node in bheads
602 if self.changelog.hasnode(node)]
606 if self.changelog.hasnode(node)]
603 newheadrevs = [self.changelog.rev(node) for node in newnodes
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
604 if self.changelog.hasnode(node)]
608 if self.changelog.hasnode(node)]
605 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
606 # Remove duplicates - nodes that are in newheadrevs and are already
610 # Remove duplicates - nodes that are in newheadrevs and are already
607 # in bheadrevs. This can happen if you strip a node whose parent
611 # in bheadrevs. This can happen if you strip a node whose parent
608 # was already a head (because they're on different branches).
612 # was already a head (because they're on different branches).
609 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
610
614
611 # Starting from tip means fewer passes over reachable. If we know
615 # Starting from tip means fewer passes over reachable. If we know
612 # the new candidates are not ancestors of existing heads, we don't
616 # the new candidates are not ancestors of existing heads, we don't
613 # have to examine ancestors of existing heads
617 # have to examine ancestors of existing heads
614 if ctxisnew:
618 if ctxisnew:
615 iterrevs = sorted(newheadrevs)
619 iterrevs = sorted(newheadrevs)
616 else:
620 else:
617 iterrevs = list(bheadrevs)
621 iterrevs = list(bheadrevs)
618
622
619 # This loop prunes out two kinds of heads - heads that are
623 # This loop prunes out two kinds of heads - heads that are
620 # superceded by a head in newheadrevs, and newheadrevs that are not
624 # superceded by a head in newheadrevs, and newheadrevs that are not
621 # heads because an existing head is their descendant.
625 # heads because an existing head is their descendant.
622 while iterrevs:
626 while iterrevs:
623 latest = iterrevs.pop()
627 latest = iterrevs.pop()
624 if latest not in bheadrevs:
628 if latest not in bheadrevs:
625 continue
629 continue
626 ancestors = set(self.changelog.ancestors([latest],
630 ancestors = set(self.changelog.ancestors([latest],
627 bheadrevs[0]))
631 bheadrevs[0]))
628 if ancestors:
632 if ancestors:
629 bheadrevs = [b for b in bheadrevs if b not in ancestors]
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
630 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
631
635
632 # There may be branches that cease to exist when the last commit in the
636 # There may be branches that cease to exist when the last commit in the
633 # branch was stripped. This code filters them out. Note that the
637 # branch was stripped. This code filters them out. Note that the
634 # branch that ceased to exist may not be in newbranches because
638 # branch that ceased to exist may not be in newbranches because
635 # newbranches is the set of candidate heads, which when you strip the
639 # newbranches is the set of candidate heads, which when you strip the
636 # last commit in a branch will be the parent branch.
640 # last commit in a branch will be the parent branch.
637 for branch in partial:
641 for branch in partial:
638 nodes = [head for head in partial[branch]
642 nodes = [head for head in partial[branch]
639 if self.changelog.hasnode(head)]
643 if self.changelog.hasnode(head)]
640 if not nodes:
644 if not nodes:
641 del partial[branch]
645 del partial[branch]
642
646
643 def lookup(self, key):
647 def lookup(self, key):
644 return self[key].node()
648 return self[key].node()
645
649
646 def lookupbranch(self, key, remote=None):
650 def lookupbranch(self, key, remote=None):
647 repo = remote or self
651 repo = remote or self
648 if key in repo.branchmap():
652 if key in repo.branchmap():
649 return key
653 return key
650
654
651 repo = (remote and remote.local()) and remote or self
655 repo = (remote and remote.local()) and remote or self
652 return repo[key].branch()
656 return repo[key].branch()
653
657
654 def known(self, nodes):
658 def known(self, nodes):
655 nm = self.changelog.nodemap
659 nm = self.changelog.nodemap
656 pc = self._phasecache
660 pc = self._phasecache
657 result = []
661 result = []
658 for n in nodes:
662 for n in nodes:
659 r = nm.get(n)
663 r = nm.get(n)
660 resp = not (r is None or pc.phase(self, r) >= phases.secret)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
661 result.append(resp)
665 result.append(resp)
662 return result
666 return result
663
667
664 def local(self):
668 def local(self):
665 return self
669 return self
666
670
667 def join(self, f):
671 def join(self, f):
668 return os.path.join(self.path, f)
672 return os.path.join(self.path, f)
669
673
670 def wjoin(self, f):
674 def wjoin(self, f):
671 return os.path.join(self.root, f)
675 return os.path.join(self.root, f)
672
676
673 def file(self, f):
677 def file(self, f):
674 if f[0] == '/':
678 if f[0] == '/':
675 f = f[1:]
679 f = f[1:]
676 return filelog.filelog(self.sopener, f)
680 return filelog.filelog(self.sopener, f)
677
681
678 def changectx(self, changeid):
682 def changectx(self, changeid):
679 return self[changeid]
683 return self[changeid]
680
684
681 def parents(self, changeid=None):
685 def parents(self, changeid=None):
682 '''get list of changectxs for parents of changeid'''
686 '''get list of changectxs for parents of changeid'''
683 return self[changeid].parents()
687 return self[changeid].parents()
684
688
685 def setparents(self, p1, p2=nullid):
689 def setparents(self, p1, p2=nullid):
686 copies = self.dirstate.setparents(p1, p2)
690 copies = self.dirstate.setparents(p1, p2)
687 if copies:
691 if copies:
688 # Adjust copy records, the dirstate cannot do it, it
692 # Adjust copy records, the dirstate cannot do it, it
689 # requires access to parents manifests. Preserve them
693 # requires access to parents manifests. Preserve them
690 # only for entries added to first parent.
694 # only for entries added to first parent.
691 pctx = self[p1]
695 pctx = self[p1]
692 for f in copies:
696 for f in copies:
693 if f not in pctx and copies[f] in pctx:
697 if f not in pctx and copies[f] in pctx:
694 self.dirstate.copy(copies[f], f)
698 self.dirstate.copy(copies[f], f)
695
699
696 def filectx(self, path, changeid=None, fileid=None):
700 def filectx(self, path, changeid=None, fileid=None):
697 """changeid can be a changeset revision, node, or tag.
701 """changeid can be a changeset revision, node, or tag.
698 fileid can be a file revision or node."""
702 fileid can be a file revision or node."""
699 return context.filectx(self, path, changeid, fileid)
703 return context.filectx(self, path, changeid, fileid)
700
704
701 def getcwd(self):
705 def getcwd(self):
702 return self.dirstate.getcwd()
706 return self.dirstate.getcwd()
703
707
704 def pathto(self, f, cwd=None):
708 def pathto(self, f, cwd=None):
705 return self.dirstate.pathto(f, cwd)
709 return self.dirstate.pathto(f, cwd)
706
710
707 def wfile(self, f, mode='r'):
711 def wfile(self, f, mode='r'):
708 return self.wopener(f, mode)
712 return self.wopener(f, mode)
709
713
710 def _link(self, f):
714 def _link(self, f):
711 return os.path.islink(self.wjoin(f))
715 return os.path.islink(self.wjoin(f))
712
716
713 def _loadfilter(self, filter):
717 def _loadfilter(self, filter):
714 if filter not in self.filterpats:
718 if filter not in self.filterpats:
715 l = []
719 l = []
716 for pat, cmd in self.ui.configitems(filter):
720 for pat, cmd in self.ui.configitems(filter):
717 if cmd == '!':
721 if cmd == '!':
718 continue
722 continue
719 mf = matchmod.match(self.root, '', [pat])
723 mf = matchmod.match(self.root, '', [pat])
720 fn = None
724 fn = None
721 params = cmd
725 params = cmd
722 for name, filterfn in self._datafilters.iteritems():
726 for name, filterfn in self._datafilters.iteritems():
723 if cmd.startswith(name):
727 if cmd.startswith(name):
724 fn = filterfn
728 fn = filterfn
725 params = cmd[len(name):].lstrip()
729 params = cmd[len(name):].lstrip()
726 break
730 break
727 if not fn:
731 if not fn:
728 fn = lambda s, c, **kwargs: util.filter(s, c)
732 fn = lambda s, c, **kwargs: util.filter(s, c)
729 # Wrap old filters not supporting keyword arguments
733 # Wrap old filters not supporting keyword arguments
730 if not inspect.getargspec(fn)[2]:
734 if not inspect.getargspec(fn)[2]:
731 oldfn = fn
735 oldfn = fn
732 fn = lambda s, c, **kwargs: oldfn(s, c)
736 fn = lambda s, c, **kwargs: oldfn(s, c)
733 l.append((mf, fn, params))
737 l.append((mf, fn, params))
734 self.filterpats[filter] = l
738 self.filterpats[filter] = l
735 return self.filterpats[filter]
739 return self.filterpats[filter]
736
740
737 def _filter(self, filterpats, filename, data):
741 def _filter(self, filterpats, filename, data):
738 for mf, fn, cmd in filterpats:
742 for mf, fn, cmd in filterpats:
739 if mf(filename):
743 if mf(filename):
740 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
741 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
742 break
746 break
743
747
744 return data
748 return data
745
749
746 @propertycache
750 @propertycache
747 def _encodefilterpats(self):
751 def _encodefilterpats(self):
748 return self._loadfilter('encode')
752 return self._loadfilter('encode')
749
753
750 @propertycache
754 @propertycache
751 def _decodefilterpats(self):
755 def _decodefilterpats(self):
752 return self._loadfilter('decode')
756 return self._loadfilter('decode')
753
757
754 def adddatafilter(self, name, filter):
758 def adddatafilter(self, name, filter):
755 self._datafilters[name] = filter
759 self._datafilters[name] = filter
756
760
757 def wread(self, filename):
761 def wread(self, filename):
758 if self._link(filename):
762 if self._link(filename):
759 data = os.readlink(self.wjoin(filename))
763 data = os.readlink(self.wjoin(filename))
760 else:
764 else:
761 data = self.wopener.read(filename)
765 data = self.wopener.read(filename)
762 return self._filter(self._encodefilterpats, filename, data)
766 return self._filter(self._encodefilterpats, filename, data)
763
767
764 def wwrite(self, filename, data, flags):
768 def wwrite(self, filename, data, flags):
765 data = self._filter(self._decodefilterpats, filename, data)
769 data = self._filter(self._decodefilterpats, filename, data)
766 if 'l' in flags:
770 if 'l' in flags:
767 self.wopener.symlink(data, filename)
771 self.wopener.symlink(data, filename)
768 else:
772 else:
769 self.wopener.write(filename, data)
773 self.wopener.write(filename, data)
770 if 'x' in flags:
774 if 'x' in flags:
771 util.setflags(self.wjoin(filename), False, True)
775 util.setflags(self.wjoin(filename), False, True)
772
776
773 def wwritedata(self, filename, data):
777 def wwritedata(self, filename, data):
774 return self._filter(self._decodefilterpats, filename, data)
778 return self._filter(self._decodefilterpats, filename, data)
775
779
776 def transaction(self, desc):
780 def transaction(self, desc):
777 tr = self._transref and self._transref() or None
781 tr = self._transref and self._transref() or None
778 if tr and tr.running():
782 if tr and tr.running():
779 return tr.nest()
783 return tr.nest()
780
784
781 # abort here if the journal already exists
785 # abort here if the journal already exists
782 if os.path.exists(self.sjoin("journal")):
786 if os.path.exists(self.sjoin("journal")):
783 raise error.RepoError(
787 raise error.RepoError(
784 _("abandoned transaction found - run hg recover"))
788 _("abandoned transaction found - run hg recover"))
785
789
786 self._writejournal(desc)
790 self._writejournal(desc)
787 renames = [(x, undoname(x)) for x in self._journalfiles()]
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
788
792
789 tr = transaction.transaction(self.ui.warn, self.sopener,
793 tr = transaction.transaction(self.ui.warn, self.sopener,
790 self.sjoin("journal"),
794 self.sjoin("journal"),
791 aftertrans(renames),
795 aftertrans(renames),
792 self.store.createmode)
796 self.store.createmode)
793 self._transref = weakref.ref(tr)
797 self._transref = weakref.ref(tr)
794 return tr
798 return tr
795
799
796 def _journalfiles(self):
800 def _journalfiles(self):
797 return (self.sjoin('journal'), self.join('journal.dirstate'),
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
798 self.join('journal.branch'), self.join('journal.desc'),
802 self.join('journal.branch'), self.join('journal.desc'),
799 self.join('journal.bookmarks'),
803 self.join('journal.bookmarks'),
800 self.sjoin('journal.phaseroots'))
804 self.sjoin('journal.phaseroots'))
801
805
802 def undofiles(self):
806 def undofiles(self):
803 return [undoname(x) for x in self._journalfiles()]
807 return [undoname(x) for x in self._journalfiles()]
804
808
805 def _writejournal(self, desc):
809 def _writejournal(self, desc):
806 self.opener.write("journal.dirstate",
810 self.opener.write("journal.dirstate",
807 self.opener.tryread("dirstate"))
811 self.opener.tryread("dirstate"))
808 self.opener.write("journal.branch",
812 self.opener.write("journal.branch",
809 encoding.fromlocal(self.dirstate.branch()))
813 encoding.fromlocal(self.dirstate.branch()))
810 self.opener.write("journal.desc",
814 self.opener.write("journal.desc",
811 "%d\n%s\n" % (len(self), desc))
815 "%d\n%s\n" % (len(self), desc))
812 self.opener.write("journal.bookmarks",
816 self.opener.write("journal.bookmarks",
813 self.opener.tryread("bookmarks"))
817 self.opener.tryread("bookmarks"))
814 self.sopener.write("journal.phaseroots",
818 self.sopener.write("journal.phaseroots",
815 self.sopener.tryread("phaseroots"))
819 self.sopener.tryread("phaseroots"))
816
820
817 def recover(self):
821 def recover(self):
818 lock = self.lock()
822 lock = self.lock()
819 try:
823 try:
820 if os.path.exists(self.sjoin("journal")):
824 if os.path.exists(self.sjoin("journal")):
821 self.ui.status(_("rolling back interrupted transaction\n"))
825 self.ui.status(_("rolling back interrupted transaction\n"))
822 transaction.rollback(self.sopener, self.sjoin("journal"),
826 transaction.rollback(self.sopener, self.sjoin("journal"),
823 self.ui.warn)
827 self.ui.warn)
824 self.invalidate()
828 self.invalidate()
825 return True
829 return True
826 else:
830 else:
827 self.ui.warn(_("no interrupted transaction available\n"))
831 self.ui.warn(_("no interrupted transaction available\n"))
828 return False
832 return False
829 finally:
833 finally:
830 lock.release()
834 lock.release()
831
835
832 def rollback(self, dryrun=False, force=False):
836 def rollback(self, dryrun=False, force=False):
833 wlock = lock = None
837 wlock = lock = None
834 try:
838 try:
835 wlock = self.wlock()
839 wlock = self.wlock()
836 lock = self.lock()
840 lock = self.lock()
837 if os.path.exists(self.sjoin("undo")):
841 if os.path.exists(self.sjoin("undo")):
838 return self._rollback(dryrun, force)
842 return self._rollback(dryrun, force)
839 else:
843 else:
840 self.ui.warn(_("no rollback information available\n"))
844 self.ui.warn(_("no rollback information available\n"))
841 return 1
845 return 1
842 finally:
846 finally:
843 release(lock, wlock)
847 release(lock, wlock)
844
848
845 def _rollback(self, dryrun, force):
849 def _rollback(self, dryrun, force):
846 ui = self.ui
850 ui = self.ui
847 try:
851 try:
848 args = self.opener.read('undo.desc').splitlines()
852 args = self.opener.read('undo.desc').splitlines()
849 (oldlen, desc, detail) = (int(args[0]), args[1], None)
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
850 if len(args) >= 3:
854 if len(args) >= 3:
851 detail = args[2]
855 detail = args[2]
852 oldtip = oldlen - 1
856 oldtip = oldlen - 1
853
857
854 if detail and ui.verbose:
858 if detail and ui.verbose:
855 msg = (_('repository tip rolled back to revision %s'
859 msg = (_('repository tip rolled back to revision %s'
856 ' (undo %s: %s)\n')
860 ' (undo %s: %s)\n')
857 % (oldtip, desc, detail))
861 % (oldtip, desc, detail))
858 else:
862 else:
859 msg = (_('repository tip rolled back to revision %s'
863 msg = (_('repository tip rolled back to revision %s'
860 ' (undo %s)\n')
864 ' (undo %s)\n')
861 % (oldtip, desc))
865 % (oldtip, desc))
862 except IOError:
866 except IOError:
863 msg = _('rolling back unknown transaction\n')
867 msg = _('rolling back unknown transaction\n')
864 desc = None
868 desc = None
865
869
866 if not force and self['.'] != self['tip'] and desc == 'commit':
870 if not force and self['.'] != self['tip'] and desc == 'commit':
867 raise util.Abort(
871 raise util.Abort(
868 _('rollback of last commit while not checked out '
872 _('rollback of last commit while not checked out '
869 'may lose data'), hint=_('use -f to force'))
873 'may lose data'), hint=_('use -f to force'))
870
874
871 ui.status(msg)
875 ui.status(msg)
872 if dryrun:
876 if dryrun:
873 return 0
877 return 0
874
878
875 parents = self.dirstate.parents()
879 parents = self.dirstate.parents()
876 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
877 if os.path.exists(self.join('undo.bookmarks')):
881 if os.path.exists(self.join('undo.bookmarks')):
878 util.rename(self.join('undo.bookmarks'),
882 util.rename(self.join('undo.bookmarks'),
879 self.join('bookmarks'))
883 self.join('bookmarks'))
880 if os.path.exists(self.sjoin('undo.phaseroots')):
884 if os.path.exists(self.sjoin('undo.phaseroots')):
881 util.rename(self.sjoin('undo.phaseroots'),
885 util.rename(self.sjoin('undo.phaseroots'),
882 self.sjoin('phaseroots'))
886 self.sjoin('phaseroots'))
883 self.invalidate()
887 self.invalidate()
884
888
885 parentgone = (parents[0] not in self.changelog.nodemap or
889 parentgone = (parents[0] not in self.changelog.nodemap or
886 parents[1] not in self.changelog.nodemap)
890 parents[1] not in self.changelog.nodemap)
887 if parentgone:
891 if parentgone:
888 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
889 try:
893 try:
890 branch = self.opener.read('undo.branch')
894 branch = self.opener.read('undo.branch')
891 self.dirstate.setbranch(branch)
895 self.dirstate.setbranch(branch)
892 except IOError:
896 except IOError:
893 ui.warn(_('named branch could not be reset: '
897 ui.warn(_('named branch could not be reset: '
894 'current branch is still \'%s\'\n')
898 'current branch is still \'%s\'\n')
895 % self.dirstate.branch())
899 % self.dirstate.branch())
896
900
897 self.dirstate.invalidate()
901 self.dirstate.invalidate()
898 parents = tuple([p.rev() for p in self.parents()])
902 parents = tuple([p.rev() for p in self.parents()])
899 if len(parents) > 1:
903 if len(parents) > 1:
900 ui.status(_('working directory now based on '
904 ui.status(_('working directory now based on '
901 'revisions %d and %d\n') % parents)
905 'revisions %d and %d\n') % parents)
902 else:
906 else:
903 ui.status(_('working directory now based on '
907 ui.status(_('working directory now based on '
904 'revision %d\n') % parents)
908 'revision %d\n') % parents)
905 # TODO: if we know which new heads may result from this rollback, pass
909 # TODO: if we know which new heads may result from this rollback, pass
906 # them to destroy(), which will prevent the branchhead cache from being
910 # them to destroy(), which will prevent the branchhead cache from being
907 # invalidated.
911 # invalidated.
908 self.destroyed()
912 self.destroyed()
909 return 0
913 return 0
910
914
911 def invalidatecaches(self):
915 def invalidatecaches(self):
912 def delcache(name):
916 def delcache(name):
913 try:
917 try:
914 delattr(self, name)
918 delattr(self, name)
915 except AttributeError:
919 except AttributeError:
916 pass
920 pass
917
921
918 delcache('_tagscache')
922 delcache('_tagscache')
919
923
920 self._branchcache = None # in UTF-8
924 self._branchcache = None # in UTF-8
921 self._branchcachetip = None
925 self._branchcachetip = None
922
926
923 def invalidatedirstate(self):
927 def invalidatedirstate(self):
924 '''Invalidates the dirstate, causing the next call to dirstate
928 '''Invalidates the dirstate, causing the next call to dirstate
925 to check if it was modified since the last time it was read,
929 to check if it was modified since the last time it was read,
926 rereading it if it has.
930 rereading it if it has.
927
931
928 This is different to dirstate.invalidate() that it doesn't always
932 This is different to dirstate.invalidate() that it doesn't always
929 rereads the dirstate. Use dirstate.invalidate() if you want to
933 rereads the dirstate. Use dirstate.invalidate() if you want to
930 explicitly read the dirstate again (i.e. restoring it to a previous
934 explicitly read the dirstate again (i.e. restoring it to a previous
931 known good state).'''
935 known good state).'''
932 if 'dirstate' in self.__dict__:
936 if 'dirstate' in self.__dict__:
933 for k in self.dirstate._filecache:
937 for k in self.dirstate._filecache:
934 try:
938 try:
935 delattr(self.dirstate, k)
939 delattr(self.dirstate, k)
936 except AttributeError:
940 except AttributeError:
937 pass
941 pass
938 delattr(self, 'dirstate')
942 delattr(self, 'dirstate')
939
943
940 def invalidate(self):
944 def invalidate(self):
941 for k in self._filecache:
945 for k in self._filecache:
942 # dirstate is invalidated separately in invalidatedirstate()
946 # dirstate is invalidated separately in invalidatedirstate()
943 if k == 'dirstate':
947 if k == 'dirstate':
944 continue
948 continue
945
949
946 try:
950 try:
947 delattr(self, k)
951 delattr(self, k)
948 except AttributeError:
952 except AttributeError:
949 pass
953 pass
950 self.invalidatecaches()
954 self.invalidatecaches()
951
955
952 # Discard all cache entries to force reloading everything.
956 # Discard all cache entries to force reloading everything.
953 self._filecache.clear()
957 self._filecache.clear()
954
958
955 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
956 try:
960 try:
957 l = lock.lock(lockname, 0, releasefn, desc=desc)
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
958 except error.LockHeld, inst:
962 except error.LockHeld, inst:
959 if not wait:
963 if not wait:
960 raise
964 raise
961 self.ui.warn(_("waiting for lock on %s held by %r\n") %
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
962 (desc, inst.locker))
966 (desc, inst.locker))
963 # default to 600 seconds timeout
967 # default to 600 seconds timeout
964 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
965 releasefn, desc=desc)
969 releasefn, desc=desc)
966 if acquirefn:
970 if acquirefn:
967 acquirefn()
971 acquirefn()
968 return l
972 return l
969
973
970 def _afterlock(self, callback):
974 def _afterlock(self, callback):
971 """add a callback to the current repository lock.
975 """add a callback to the current repository lock.
972
976
973 The callback will be executed on lock release."""
977 The callback will be executed on lock release."""
974 l = self._lockref and self._lockref()
978 l = self._lockref and self._lockref()
975 if l:
979 if l:
976 l.postrelease.append(callback)
980 l.postrelease.append(callback)
977 else:
981 else:
978 callback()
982 callback()
979
983
980 def lock(self, wait=True):
984 def lock(self, wait=True):
981 '''Lock the repository store (.hg/store) and return a weak reference
985 '''Lock the repository store (.hg/store) and return a weak reference
982 to the lock. Use this before modifying the store (e.g. committing or
986 to the lock. Use this before modifying the store (e.g. committing or
983 stripping). If you are opening a transaction, get a lock as well.)'''
987 stripping). If you are opening a transaction, get a lock as well.)'''
984 l = self._lockref and self._lockref()
988 l = self._lockref and self._lockref()
985 if l is not None and l.held:
989 if l is not None and l.held:
986 l.lock()
990 l.lock()
987 return l
991 return l
988
992
989 def unlock():
993 def unlock():
990 self.store.write()
994 self.store.write()
991 if '_phasecache' in vars(self):
995 if '_phasecache' in vars(self):
992 self._phasecache.write()
996 self._phasecache.write()
993 if 'obsstore' in vars(self) and self.obsstore._new:
997 if 'obsstore' in vars(self) and self.obsstore._new:
994 # XXX: transaction logic should be used here. But for
998 # XXX: transaction logic should be used here. But for
995 # now rewriting the whole file is good enough.
999 # now rewriting the whole file is good enough.
996 f = self.sopener('obsstore', 'wb', atomictemp=True)
1000 f = self.sopener('obsstore', 'wb', atomictemp=True)
997 try:
1001 try:
998 self.obsstore.flushmarkers(f)
1002 self.obsstore.flushmarkers(f)
999 f.close()
1003 f.close()
1000 except: # re-raises
1004 except: # re-raises
1001 f.discard()
1005 f.discard()
1002 raise
1006 raise
1003 for k, ce in self._filecache.items():
1007 for k, ce in self._filecache.items():
1004 if k == 'dirstate':
1008 if k == 'dirstate':
1005 continue
1009 continue
1006 ce.refresh()
1010 ce.refresh()
1007
1011
1008 l = self._lock(self.sjoin("lock"), wait, unlock,
1012 l = self._lock(self.sjoin("lock"), wait, unlock,
1009 self.invalidate, _('repository %s') % self.origroot)
1013 self.invalidate, _('repository %s') % self.origroot)
1010 self._lockref = weakref.ref(l)
1014 self._lockref = weakref.ref(l)
1011 return l
1015 return l
1012
1016
1013 def wlock(self, wait=True):
1017 def wlock(self, wait=True):
1014 '''Lock the non-store parts of the repository (everything under
1018 '''Lock the non-store parts of the repository (everything under
1015 .hg except .hg/store) and return a weak reference to the lock.
1019 .hg except .hg/store) and return a weak reference to the lock.
1016 Use this before modifying files in .hg.'''
1020 Use this before modifying files in .hg.'''
1017 l = self._wlockref and self._wlockref()
1021 l = self._wlockref and self._wlockref()
1018 if l is not None and l.held:
1022 if l is not None and l.held:
1019 l.lock()
1023 l.lock()
1020 return l
1024 return l
1021
1025
1022 def unlock():
1026 def unlock():
1023 self.dirstate.write()
1027 self.dirstate.write()
1024 ce = self._filecache.get('dirstate')
1028 ce = self._filecache.get('dirstate')
1025 if ce:
1029 if ce:
1026 ce.refresh()
1030 ce.refresh()
1027
1031
1028 l = self._lock(self.join("wlock"), wait, unlock,
1032 l = self._lock(self.join("wlock"), wait, unlock,
1029 self.invalidatedirstate, _('working directory of %s') %
1033 self.invalidatedirstate, _('working directory of %s') %
1030 self.origroot)
1034 self.origroot)
1031 self._wlockref = weakref.ref(l)
1035 self._wlockref = weakref.ref(l)
1032 return l
1036 return l
1033
1037
1034 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1038 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1035 """
1039 """
1036 commit an individual file as part of a larger transaction
1040 commit an individual file as part of a larger transaction
1037 """
1041 """
1038
1042
1039 fname = fctx.path()
1043 fname = fctx.path()
1040 text = fctx.data()
1044 text = fctx.data()
1041 flog = self.file(fname)
1045 flog = self.file(fname)
1042 fparent1 = manifest1.get(fname, nullid)
1046 fparent1 = manifest1.get(fname, nullid)
1043 fparent2 = fparent2o = manifest2.get(fname, nullid)
1047 fparent2 = fparent2o = manifest2.get(fname, nullid)
1044
1048
1045 meta = {}
1049 meta = {}
1046 copy = fctx.renamed()
1050 copy = fctx.renamed()
1047 if copy and copy[0] != fname:
1051 if copy and copy[0] != fname:
1048 # Mark the new revision of this file as a copy of another
1052 # Mark the new revision of this file as a copy of another
1049 # file. This copy data will effectively act as a parent
1053 # file. This copy data will effectively act as a parent
1050 # of this new revision. If this is a merge, the first
1054 # of this new revision. If this is a merge, the first
1051 # parent will be the nullid (meaning "look up the copy data")
1055 # parent will be the nullid (meaning "look up the copy data")
1052 # and the second one will be the other parent. For example:
1056 # and the second one will be the other parent. For example:
1053 #
1057 #
1054 # 0 --- 1 --- 3 rev1 changes file foo
1058 # 0 --- 1 --- 3 rev1 changes file foo
1055 # \ / rev2 renames foo to bar and changes it
1059 # \ / rev2 renames foo to bar and changes it
1056 # \- 2 -/ rev3 should have bar with all changes and
1060 # \- 2 -/ rev3 should have bar with all changes and
1057 # should record that bar descends from
1061 # should record that bar descends from
1058 # bar in rev2 and foo in rev1
1062 # bar in rev2 and foo in rev1
1059 #
1063 #
1060 # this allows this merge to succeed:
1064 # this allows this merge to succeed:
1061 #
1065 #
1062 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1066 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1063 # \ / merging rev3 and rev4 should use bar@rev2
1067 # \ / merging rev3 and rev4 should use bar@rev2
1064 # \- 2 --- 4 as the merge base
1068 # \- 2 --- 4 as the merge base
1065 #
1069 #
1066
1070
1067 cfname = copy[0]
1071 cfname = copy[0]
1068 crev = manifest1.get(cfname)
1072 crev = manifest1.get(cfname)
1069 newfparent = fparent2
1073 newfparent = fparent2
1070
1074
1071 if manifest2: # branch merge
1075 if manifest2: # branch merge
1072 if fparent2 == nullid or crev is None: # copied on remote side
1076 if fparent2 == nullid or crev is None: # copied on remote side
1073 if cfname in manifest2:
1077 if cfname in manifest2:
1074 crev = manifest2[cfname]
1078 crev = manifest2[cfname]
1075 newfparent = fparent1
1079 newfparent = fparent1
1076
1080
1077 # find source in nearest ancestor if we've lost track
1081 # find source in nearest ancestor if we've lost track
1078 if not crev:
1082 if not crev:
1079 self.ui.debug(" %s: searching for copy revision for %s\n" %
1083 self.ui.debug(" %s: searching for copy revision for %s\n" %
1080 (fname, cfname))
1084 (fname, cfname))
1081 for ancestor in self[None].ancestors():
1085 for ancestor in self[None].ancestors():
1082 if cfname in ancestor:
1086 if cfname in ancestor:
1083 crev = ancestor[cfname].filenode()
1087 crev = ancestor[cfname].filenode()
1084 break
1088 break
1085
1089
1086 if crev:
1090 if crev:
1087 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1091 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1088 meta["copy"] = cfname
1092 meta["copy"] = cfname
1089 meta["copyrev"] = hex(crev)
1093 meta["copyrev"] = hex(crev)
1090 fparent1, fparent2 = nullid, newfparent
1094 fparent1, fparent2 = nullid, newfparent
1091 else:
1095 else:
1092 self.ui.warn(_("warning: can't find ancestor for '%s' "
1096 self.ui.warn(_("warning: can't find ancestor for '%s' "
1093 "copied from '%s'!\n") % (fname, cfname))
1097 "copied from '%s'!\n") % (fname, cfname))
1094
1098
1095 elif fparent2 != nullid:
1099 elif fparent2 != nullid:
1096 # is one parent an ancestor of the other?
1100 # is one parent an ancestor of the other?
1097 fparentancestor = flog.ancestor(fparent1, fparent2)
1101 fparentancestor = flog.ancestor(fparent1, fparent2)
1098 if fparentancestor == fparent1:
1102 if fparentancestor == fparent1:
1099 fparent1, fparent2 = fparent2, nullid
1103 fparent1, fparent2 = fparent2, nullid
1100 elif fparentancestor == fparent2:
1104 elif fparentancestor == fparent2:
1101 fparent2 = nullid
1105 fparent2 = nullid
1102
1106
1103 # is the file changed?
1107 # is the file changed?
1104 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1108 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1105 changelist.append(fname)
1109 changelist.append(fname)
1106 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1110 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1107
1111
1108 # are just the flags changed during merge?
1112 # are just the flags changed during merge?
1109 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1113 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1110 changelist.append(fname)
1114 changelist.append(fname)
1111
1115
1112 return fparent1
1116 return fparent1
1113
1117
1114 def commit(self, text="", user=None, date=None, match=None, force=False,
1118 def commit(self, text="", user=None, date=None, match=None, force=False,
1115 editor=False, extra={}):
1119 editor=False, extra={}):
1116 """Add a new revision to current repository.
1120 """Add a new revision to current repository.
1117
1121
1118 Revision information is gathered from the working directory,
1122 Revision information is gathered from the working directory,
1119 match can be used to filter the committed files. If editor is
1123 match can be used to filter the committed files. If editor is
1120 supplied, it is called to get a commit message.
1124 supplied, it is called to get a commit message.
1121 """
1125 """
1122
1126
1123 def fail(f, msg):
1127 def fail(f, msg):
1124 raise util.Abort('%s: %s' % (f, msg))
1128 raise util.Abort('%s: %s' % (f, msg))
1125
1129
1126 if not match:
1130 if not match:
1127 match = matchmod.always(self.root, '')
1131 match = matchmod.always(self.root, '')
1128
1132
1129 if not force:
1133 if not force:
1130 vdirs = []
1134 vdirs = []
1131 match.dir = vdirs.append
1135 match.dir = vdirs.append
1132 match.bad = fail
1136 match.bad = fail
1133
1137
1134 wlock = self.wlock()
1138 wlock = self.wlock()
1135 try:
1139 try:
1136 wctx = self[None]
1140 wctx = self[None]
1137 merge = len(wctx.parents()) > 1
1141 merge = len(wctx.parents()) > 1
1138
1142
1139 if (not force and merge and match and
1143 if (not force and merge and match and
1140 (match.files() or match.anypats())):
1144 (match.files() or match.anypats())):
1141 raise util.Abort(_('cannot partially commit a merge '
1145 raise util.Abort(_('cannot partially commit a merge '
1142 '(do not specify files or patterns)'))
1146 '(do not specify files or patterns)'))
1143
1147
1144 changes = self.status(match=match, clean=force)
1148 changes = self.status(match=match, clean=force)
1145 if force:
1149 if force:
1146 changes[0].extend(changes[6]) # mq may commit unchanged files
1150 changes[0].extend(changes[6]) # mq may commit unchanged files
1147
1151
1148 # check subrepos
1152 # check subrepos
1149 subs = []
1153 subs = []
1150 commitsubs = set()
1154 commitsubs = set()
1151 newstate = wctx.substate.copy()
1155 newstate = wctx.substate.copy()
1152 # only manage subrepos and .hgsubstate if .hgsub is present
1156 # only manage subrepos and .hgsubstate if .hgsub is present
1153 if '.hgsub' in wctx:
1157 if '.hgsub' in wctx:
1154 # we'll decide whether to track this ourselves, thanks
1158 # we'll decide whether to track this ourselves, thanks
1155 if '.hgsubstate' in changes[0]:
1159 if '.hgsubstate' in changes[0]:
1156 changes[0].remove('.hgsubstate')
1160 changes[0].remove('.hgsubstate')
1157 if '.hgsubstate' in changes[2]:
1161 if '.hgsubstate' in changes[2]:
1158 changes[2].remove('.hgsubstate')
1162 changes[2].remove('.hgsubstate')
1159
1163
1160 # compare current state to last committed state
1164 # compare current state to last committed state
1161 # build new substate based on last committed state
1165 # build new substate based on last committed state
1162 oldstate = wctx.p1().substate
1166 oldstate = wctx.p1().substate
1163 for s in sorted(newstate.keys()):
1167 for s in sorted(newstate.keys()):
1164 if not match(s):
1168 if not match(s):
1165 # ignore working copy, use old state if present
1169 # ignore working copy, use old state if present
1166 if s in oldstate:
1170 if s in oldstate:
1167 newstate[s] = oldstate[s]
1171 newstate[s] = oldstate[s]
1168 continue
1172 continue
1169 if not force:
1173 if not force:
1170 raise util.Abort(
1174 raise util.Abort(
1171 _("commit with new subrepo %s excluded") % s)
1175 _("commit with new subrepo %s excluded") % s)
1172 if wctx.sub(s).dirty(True):
1176 if wctx.sub(s).dirty(True):
1173 if not self.ui.configbool('ui', 'commitsubrepos'):
1177 if not self.ui.configbool('ui', 'commitsubrepos'):
1174 raise util.Abort(
1178 raise util.Abort(
1175 _("uncommitted changes in subrepo %s") % s,
1179 _("uncommitted changes in subrepo %s") % s,
1176 hint=_("use --subrepos for recursive commit"))
1180 hint=_("use --subrepos for recursive commit"))
1177 subs.append(s)
1181 subs.append(s)
1178 commitsubs.add(s)
1182 commitsubs.add(s)
1179 else:
1183 else:
1180 bs = wctx.sub(s).basestate()
1184 bs = wctx.sub(s).basestate()
1181 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1185 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1182 if oldstate.get(s, (None, None, None))[1] != bs:
1186 if oldstate.get(s, (None, None, None))[1] != bs:
1183 subs.append(s)
1187 subs.append(s)
1184
1188
1185 # check for removed subrepos
1189 # check for removed subrepos
1186 for p in wctx.parents():
1190 for p in wctx.parents():
1187 r = [s for s in p.substate if s not in newstate]
1191 r = [s for s in p.substate if s not in newstate]
1188 subs += [s for s in r if match(s)]
1192 subs += [s for s in r if match(s)]
1189 if subs:
1193 if subs:
1190 if (not match('.hgsub') and
1194 if (not match('.hgsub') and
1191 '.hgsub' in (wctx.modified() + wctx.added())):
1195 '.hgsub' in (wctx.modified() + wctx.added())):
1192 raise util.Abort(
1196 raise util.Abort(
1193 _("can't commit subrepos without .hgsub"))
1197 _("can't commit subrepos without .hgsub"))
1194 changes[0].insert(0, '.hgsubstate')
1198 changes[0].insert(0, '.hgsubstate')
1195
1199
1196 elif '.hgsub' in changes[2]:
1200 elif '.hgsub' in changes[2]:
1197 # clean up .hgsubstate when .hgsub is removed
1201 # clean up .hgsubstate when .hgsub is removed
1198 if ('.hgsubstate' in wctx and
1202 if ('.hgsubstate' in wctx and
1199 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1203 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1200 changes[2].insert(0, '.hgsubstate')
1204 changes[2].insert(0, '.hgsubstate')
1201
1205
1202 # make sure all explicit patterns are matched
1206 # make sure all explicit patterns are matched
1203 if not force and match.files():
1207 if not force and match.files():
1204 matched = set(changes[0] + changes[1] + changes[2])
1208 matched = set(changes[0] + changes[1] + changes[2])
1205
1209
1206 for f in match.files():
1210 for f in match.files():
1207 if f == '.' or f in matched or f in wctx.substate:
1211 if f == '.' or f in matched or f in wctx.substate:
1208 continue
1212 continue
1209 if f in changes[3]: # missing
1213 if f in changes[3]: # missing
1210 fail(f, _('file not found!'))
1214 fail(f, _('file not found!'))
1211 if f in vdirs: # visited directory
1215 if f in vdirs: # visited directory
1212 d = f + '/'
1216 d = f + '/'
1213 for mf in matched:
1217 for mf in matched:
1214 if mf.startswith(d):
1218 if mf.startswith(d):
1215 break
1219 break
1216 else:
1220 else:
1217 fail(f, _("no match under directory!"))
1221 fail(f, _("no match under directory!"))
1218 elif f not in self.dirstate:
1222 elif f not in self.dirstate:
1219 fail(f, _("file not tracked!"))
1223 fail(f, _("file not tracked!"))
1220
1224
1221 if (not force and not extra.get("close") and not merge
1225 if (not force and not extra.get("close") and not merge
1222 and not (changes[0] or changes[1] or changes[2])
1226 and not (changes[0] or changes[1] or changes[2])
1223 and wctx.branch() == wctx.p1().branch()):
1227 and wctx.branch() == wctx.p1().branch()):
1224 return None
1228 return None
1225
1229
1226 if merge and changes[3]:
1230 if merge and changes[3]:
1227 raise util.Abort(_("cannot commit merge with missing files"))
1231 raise util.Abort(_("cannot commit merge with missing files"))
1228
1232
1229 ms = mergemod.mergestate(self)
1233 ms = mergemod.mergestate(self)
1230 for f in changes[0]:
1234 for f in changes[0]:
1231 if f in ms and ms[f] == 'u':
1235 if f in ms and ms[f] == 'u':
1232 raise util.Abort(_("unresolved merge conflicts "
1236 raise util.Abort(_("unresolved merge conflicts "
1233 "(see hg help resolve)"))
1237 "(see hg help resolve)"))
1234
1238
1235 cctx = context.workingctx(self, text, user, date, extra, changes)
1239 cctx = context.workingctx(self, text, user, date, extra, changes)
1236 if editor:
1240 if editor:
1237 cctx._text = editor(self, cctx, subs)
1241 cctx._text = editor(self, cctx, subs)
1238 edited = (text != cctx._text)
1242 edited = (text != cctx._text)
1239
1243
1240 # commit subs and write new state
1244 # commit subs and write new state
1241 if subs:
1245 if subs:
1242 for s in sorted(commitsubs):
1246 for s in sorted(commitsubs):
1243 sub = wctx.sub(s)
1247 sub = wctx.sub(s)
1244 self.ui.status(_('committing subrepository %s\n') %
1248 self.ui.status(_('committing subrepository %s\n') %
1245 subrepo.subrelpath(sub))
1249 subrepo.subrelpath(sub))
1246 sr = sub.commit(cctx._text, user, date)
1250 sr = sub.commit(cctx._text, user, date)
1247 newstate[s] = (newstate[s][0], sr)
1251 newstate[s] = (newstate[s][0], sr)
1248 subrepo.writestate(self, newstate)
1252 subrepo.writestate(self, newstate)
1249
1253
1250 # Save commit message in case this transaction gets rolled back
1254 # Save commit message in case this transaction gets rolled back
1251 # (e.g. by a pretxncommit hook). Leave the content alone on
1255 # (e.g. by a pretxncommit hook). Leave the content alone on
1252 # the assumption that the user will use the same editor again.
1256 # the assumption that the user will use the same editor again.
1253 msgfn = self.savecommitmessage(cctx._text)
1257 msgfn = self.savecommitmessage(cctx._text)
1254
1258
1255 p1, p2 = self.dirstate.parents()
1259 p1, p2 = self.dirstate.parents()
1256 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1260 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1257 try:
1261 try:
1258 self.hook("precommit", throw=True, parent1=hookp1,
1262 self.hook("precommit", throw=True, parent1=hookp1,
1259 parent2=hookp2)
1263 parent2=hookp2)
1260 ret = self.commitctx(cctx, True)
1264 ret = self.commitctx(cctx, True)
1261 except: # re-raises
1265 except: # re-raises
1262 if edited:
1266 if edited:
1263 self.ui.write(
1267 self.ui.write(
1264 _('note: commit message saved in %s\n') % msgfn)
1268 _('note: commit message saved in %s\n') % msgfn)
1265 raise
1269 raise
1266
1270
1267 # update bookmarks, dirstate and mergestate
1271 # update bookmarks, dirstate and mergestate
1268 bookmarks.update(self, [p1, p2], ret)
1272 bookmarks.update(self, [p1, p2], ret)
1269 for f in changes[0] + changes[1]:
1273 for f in changes[0] + changes[1]:
1270 self.dirstate.normal(f)
1274 self.dirstate.normal(f)
1271 for f in changes[2]:
1275 for f in changes[2]:
1272 self.dirstate.drop(f)
1276 self.dirstate.drop(f)
1273 self.dirstate.setparents(ret)
1277 self.dirstate.setparents(ret)
1274 ms.reset()
1278 ms.reset()
1275 finally:
1279 finally:
1276 wlock.release()
1280 wlock.release()
1277
1281
1278 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1282 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1279 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1283 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1280 self._afterlock(commithook)
1284 self._afterlock(commithook)
1281 return ret
1285 return ret
1282
1286
1283 def commitctx(self, ctx, error=False):
1287 def commitctx(self, ctx, error=False):
1284 """Add a new revision to current repository.
1288 """Add a new revision to current repository.
1285 Revision information is passed via the context argument.
1289 Revision information is passed via the context argument.
1286 """
1290 """
1287
1291
1288 tr = lock = None
1292 tr = lock = None
1289 removed = list(ctx.removed())
1293 removed = list(ctx.removed())
1290 p1, p2 = ctx.p1(), ctx.p2()
1294 p1, p2 = ctx.p1(), ctx.p2()
1291 user = ctx.user()
1295 user = ctx.user()
1292
1296
1293 lock = self.lock()
1297 lock = self.lock()
1294 try:
1298 try:
1295 tr = self.transaction("commit")
1299 tr = self.transaction("commit")
1296 trp = weakref.proxy(tr)
1300 trp = weakref.proxy(tr)
1297
1301
1298 if ctx.files():
1302 if ctx.files():
1299 m1 = p1.manifest().copy()
1303 m1 = p1.manifest().copy()
1300 m2 = p2.manifest()
1304 m2 = p2.manifest()
1301
1305
1302 # check in files
1306 # check in files
1303 new = {}
1307 new = {}
1304 changed = []
1308 changed = []
1305 linkrev = len(self)
1309 linkrev = len(self)
1306 for f in sorted(ctx.modified() + ctx.added()):
1310 for f in sorted(ctx.modified() + ctx.added()):
1307 self.ui.note(f + "\n")
1311 self.ui.note(f + "\n")
1308 try:
1312 try:
1309 fctx = ctx[f]
1313 fctx = ctx[f]
1310 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1314 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1311 changed)
1315 changed)
1312 m1.set(f, fctx.flags())
1316 m1.set(f, fctx.flags())
1313 except OSError, inst:
1317 except OSError, inst:
1314 self.ui.warn(_("trouble committing %s!\n") % f)
1318 self.ui.warn(_("trouble committing %s!\n") % f)
1315 raise
1319 raise
1316 except IOError, inst:
1320 except IOError, inst:
1317 errcode = getattr(inst, 'errno', errno.ENOENT)
1321 errcode = getattr(inst, 'errno', errno.ENOENT)
1318 if error or errcode and errcode != errno.ENOENT:
1322 if error or errcode and errcode != errno.ENOENT:
1319 self.ui.warn(_("trouble committing %s!\n") % f)
1323 self.ui.warn(_("trouble committing %s!\n") % f)
1320 raise
1324 raise
1321 else:
1325 else:
1322 removed.append(f)
1326 removed.append(f)
1323
1327
1324 # update manifest
1328 # update manifest
1325 m1.update(new)
1329 m1.update(new)
1326 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1330 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1327 drop = [f for f in removed if f in m1]
1331 drop = [f for f in removed if f in m1]
1328 for f in drop:
1332 for f in drop:
1329 del m1[f]
1333 del m1[f]
1330 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1334 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1331 p2.manifestnode(), (new, drop))
1335 p2.manifestnode(), (new, drop))
1332 files = changed + removed
1336 files = changed + removed
1333 else:
1337 else:
1334 mn = p1.manifestnode()
1338 mn = p1.manifestnode()
1335 files = []
1339 files = []
1336
1340
1337 # update changelog
1341 # update changelog
1338 self.changelog.delayupdate()
1342 self.changelog.delayupdate()
1339 n = self.changelog.add(mn, files, ctx.description(),
1343 n = self.changelog.add(mn, files, ctx.description(),
1340 trp, p1.node(), p2.node(),
1344 trp, p1.node(), p2.node(),
1341 user, ctx.date(), ctx.extra().copy())
1345 user, ctx.date(), ctx.extra().copy())
1342 p = lambda: self.changelog.writepending() and self.root or ""
1346 p = lambda: self.changelog.writepending() and self.root or ""
1343 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1347 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1344 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1348 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1345 parent2=xp2, pending=p)
1349 parent2=xp2, pending=p)
1346 self.changelog.finalize(trp)
1350 self.changelog.finalize(trp)
1347 # set the new commit is proper phase
1351 # set the new commit is proper phase
1348 targetphase = phases.newcommitphase(self.ui)
1352 targetphase = phases.newcommitphase(self.ui)
1349 if targetphase:
1353 if targetphase:
1350 # retract boundary do not alter parent changeset.
1354 # retract boundary do not alter parent changeset.
1351 # if a parent have higher the resulting phase will
1355 # if a parent have higher the resulting phase will
1352 # be compliant anyway
1356 # be compliant anyway
1353 #
1357 #
1354 # if minimal phase was 0 we don't need to retract anything
1358 # if minimal phase was 0 we don't need to retract anything
1355 phases.retractboundary(self, targetphase, [n])
1359 phases.retractboundary(self, targetphase, [n])
1356 tr.close()
1360 tr.close()
1357 self.updatebranchcache()
1361 self.updatebranchcache()
1358 return n
1362 return n
1359 finally:
1363 finally:
1360 if tr:
1364 if tr:
1361 tr.release()
1365 tr.release()
1362 lock.release()
1366 lock.release()
1363
1367
1364 def destroyed(self, newheadnodes=None):
1368 def destroyed(self, newheadnodes=None):
1365 '''Inform the repository that nodes have been destroyed.
1369 '''Inform the repository that nodes have been destroyed.
1366 Intended for use by strip and rollback, so there's a common
1370 Intended for use by strip and rollback, so there's a common
1367 place for anything that has to be done after destroying history.
1371 place for anything that has to be done after destroying history.
1368
1372
1369 If you know the branchheadcache was uptodate before nodes were removed
1373 If you know the branchheadcache was uptodate before nodes were removed
1370 and you also know the set of candidate new heads that may have resulted
1374 and you also know the set of candidate new heads that may have resulted
1371 from the destruction, you can set newheadnodes. This will enable the
1375 from the destruction, you can set newheadnodes. This will enable the
1372 code to update the branchheads cache, rather than having future code
1376 code to update the branchheads cache, rather than having future code
1373 decide it's invalid and regenrating it from scratch.
1377 decide it's invalid and regenrating it from scratch.
1374 '''
1378 '''
1375 # If we have info, newheadnodes, on how to update the branch cache, do
1379 # If we have info, newheadnodes, on how to update the branch cache, do
1376 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1380 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1377 # will be caught the next time it is read.
1381 # will be caught the next time it is read.
1378 if newheadnodes:
1382 if newheadnodes:
1379 tiprev = len(self) - 1
1383 tiprev = len(self) - 1
1380 ctxgen = (self[node] for node in newheadnodes
1384 ctxgen = (self[node] for node in newheadnodes
1381 if self.changelog.hasnode(node))
1385 if self.changelog.hasnode(node))
1382 self._updatebranchcache(self._branchcache, ctxgen)
1386 self._updatebranchcache(self._branchcache, ctxgen)
1383 self._writebranchcache(self._branchcache, self.changelog.tip(),
1387 self._writebranchcache(self._branchcache, self.changelog.tip(),
1384 tiprev)
1388 tiprev)
1385
1389
1386 # Ensure the persistent tag cache is updated. Doing it now
1390 # Ensure the persistent tag cache is updated. Doing it now
1387 # means that the tag cache only has to worry about destroyed
1391 # means that the tag cache only has to worry about destroyed
1388 # heads immediately after a strip/rollback. That in turn
1392 # heads immediately after a strip/rollback. That in turn
1389 # guarantees that "cachetip == currenttip" (comparing both rev
1393 # guarantees that "cachetip == currenttip" (comparing both rev
1390 # and node) always means no nodes have been added or destroyed.
1394 # and node) always means no nodes have been added or destroyed.
1391
1395
1392 # XXX this is suboptimal when qrefresh'ing: we strip the current
1396 # XXX this is suboptimal when qrefresh'ing: we strip the current
1393 # head, refresh the tag cache, then immediately add a new head.
1397 # head, refresh the tag cache, then immediately add a new head.
1394 # But I think doing it this way is necessary for the "instant
1398 # But I think doing it this way is necessary for the "instant
1395 # tag cache retrieval" case to work.
1399 # tag cache retrieval" case to work.
1396 self.invalidatecaches()
1400 self.invalidatecaches()
1397
1401
1398 def walk(self, match, node=None):
1402 def walk(self, match, node=None):
1399 '''
1403 '''
1400 walk recursively through the directory tree or a given
1404 walk recursively through the directory tree or a given
1401 changeset, finding all files matched by the match
1405 changeset, finding all files matched by the match
1402 function
1406 function
1403 '''
1407 '''
1404 return self[node].walk(match)
1408 return self[node].walk(match)
1405
1409
1406 def status(self, node1='.', node2=None, match=None,
1410 def status(self, node1='.', node2=None, match=None,
1407 ignored=False, clean=False, unknown=False,
1411 ignored=False, clean=False, unknown=False,
1408 listsubrepos=False):
1412 listsubrepos=False):
1409 """return status of files between two nodes or node and working
1413 """return status of files between two nodes or node and working
1410 directory.
1414 directory.
1411
1415
1412 If node1 is None, use the first dirstate parent instead.
1416 If node1 is None, use the first dirstate parent instead.
1413 If node2 is None, compare node1 with working directory.
1417 If node2 is None, compare node1 with working directory.
1414 """
1418 """
1415
1419
1416 def mfmatches(ctx):
1420 def mfmatches(ctx):
1417 mf = ctx.manifest().copy()
1421 mf = ctx.manifest().copy()
1418 if match.always():
1422 if match.always():
1419 return mf
1423 return mf
1420 for fn in mf.keys():
1424 for fn in mf.keys():
1421 if not match(fn):
1425 if not match(fn):
1422 del mf[fn]
1426 del mf[fn]
1423 return mf
1427 return mf
1424
1428
1425 if isinstance(node1, context.changectx):
1429 if isinstance(node1, context.changectx):
1426 ctx1 = node1
1430 ctx1 = node1
1427 else:
1431 else:
1428 ctx1 = self[node1]
1432 ctx1 = self[node1]
1429 if isinstance(node2, context.changectx):
1433 if isinstance(node2, context.changectx):
1430 ctx2 = node2
1434 ctx2 = node2
1431 else:
1435 else:
1432 ctx2 = self[node2]
1436 ctx2 = self[node2]
1433
1437
1434 working = ctx2.rev() is None
1438 working = ctx2.rev() is None
1435 parentworking = working and ctx1 == self['.']
1439 parentworking = working and ctx1 == self['.']
1436 match = match or matchmod.always(self.root, self.getcwd())
1440 match = match or matchmod.always(self.root, self.getcwd())
1437 listignored, listclean, listunknown = ignored, clean, unknown
1441 listignored, listclean, listunknown = ignored, clean, unknown
1438
1442
1439 # load earliest manifest first for caching reasons
1443 # load earliest manifest first for caching reasons
1440 if not working and ctx2.rev() < ctx1.rev():
1444 if not working and ctx2.rev() < ctx1.rev():
1441 ctx2.manifest()
1445 ctx2.manifest()
1442
1446
1443 if not parentworking:
1447 if not parentworking:
1444 def bad(f, msg):
1448 def bad(f, msg):
1445 # 'f' may be a directory pattern from 'match.files()',
1449 # 'f' may be a directory pattern from 'match.files()',
1446 # so 'f not in ctx1' is not enough
1450 # so 'f not in ctx1' is not enough
1447 if f not in ctx1 and f not in ctx1.dirs():
1451 if f not in ctx1 and f not in ctx1.dirs():
1448 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1452 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1449 match.bad = bad
1453 match.bad = bad
1450
1454
1451 if working: # we need to scan the working dir
1455 if working: # we need to scan the working dir
1452 subrepos = []
1456 subrepos = []
1453 if '.hgsub' in self.dirstate:
1457 if '.hgsub' in self.dirstate:
1454 subrepos = ctx2.substate.keys()
1458 subrepos = ctx2.substate.keys()
1455 s = self.dirstate.status(match, subrepos, listignored,
1459 s = self.dirstate.status(match, subrepos, listignored,
1456 listclean, listunknown)
1460 listclean, listunknown)
1457 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1461 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1458
1462
1459 # check for any possibly clean files
1463 # check for any possibly clean files
1460 if parentworking and cmp:
1464 if parentworking and cmp:
1461 fixup = []
1465 fixup = []
1462 # do a full compare of any files that might have changed
1466 # do a full compare of any files that might have changed
1463 for f in sorted(cmp):
1467 for f in sorted(cmp):
1464 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1468 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1465 or ctx1[f].cmp(ctx2[f])):
1469 or ctx1[f].cmp(ctx2[f])):
1466 modified.append(f)
1470 modified.append(f)
1467 else:
1471 else:
1468 fixup.append(f)
1472 fixup.append(f)
1469
1473
1470 # update dirstate for files that are actually clean
1474 # update dirstate for files that are actually clean
1471 if fixup:
1475 if fixup:
1472 if listclean:
1476 if listclean:
1473 clean += fixup
1477 clean += fixup
1474
1478
1475 try:
1479 try:
1476 # updating the dirstate is optional
1480 # updating the dirstate is optional
1477 # so we don't wait on the lock
1481 # so we don't wait on the lock
1478 wlock = self.wlock(False)
1482 wlock = self.wlock(False)
1479 try:
1483 try:
1480 for f in fixup:
1484 for f in fixup:
1481 self.dirstate.normal(f)
1485 self.dirstate.normal(f)
1482 finally:
1486 finally:
1483 wlock.release()
1487 wlock.release()
1484 except error.LockError:
1488 except error.LockError:
1485 pass
1489 pass
1486
1490
1487 if not parentworking:
1491 if not parentworking:
1488 mf1 = mfmatches(ctx1)
1492 mf1 = mfmatches(ctx1)
1489 if working:
1493 if working:
1490 # we are comparing working dir against non-parent
1494 # we are comparing working dir against non-parent
1491 # generate a pseudo-manifest for the working dir
1495 # generate a pseudo-manifest for the working dir
1492 mf2 = mfmatches(self['.'])
1496 mf2 = mfmatches(self['.'])
1493 for f in cmp + modified + added:
1497 for f in cmp + modified + added:
1494 mf2[f] = None
1498 mf2[f] = None
1495 mf2.set(f, ctx2.flags(f))
1499 mf2.set(f, ctx2.flags(f))
1496 for f in removed:
1500 for f in removed:
1497 if f in mf2:
1501 if f in mf2:
1498 del mf2[f]
1502 del mf2[f]
1499 else:
1503 else:
1500 # we are comparing two revisions
1504 # we are comparing two revisions
1501 deleted, unknown, ignored = [], [], []
1505 deleted, unknown, ignored = [], [], []
1502 mf2 = mfmatches(ctx2)
1506 mf2 = mfmatches(ctx2)
1503
1507
1504 modified, added, clean = [], [], []
1508 modified, added, clean = [], [], []
1505 withflags = mf1.withflags() | mf2.withflags()
1509 withflags = mf1.withflags() | mf2.withflags()
1506 for fn in mf2:
1510 for fn in mf2:
1507 if fn in mf1:
1511 if fn in mf1:
1508 if (fn not in deleted and
1512 if (fn not in deleted and
1509 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1513 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1510 (mf1[fn] != mf2[fn] and
1514 (mf1[fn] != mf2[fn] and
1511 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1515 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1512 modified.append(fn)
1516 modified.append(fn)
1513 elif listclean:
1517 elif listclean:
1514 clean.append(fn)
1518 clean.append(fn)
1515 del mf1[fn]
1519 del mf1[fn]
1516 elif fn not in deleted:
1520 elif fn not in deleted:
1517 added.append(fn)
1521 added.append(fn)
1518 removed = mf1.keys()
1522 removed = mf1.keys()
1519
1523
1520 if working and modified and not self.dirstate._checklink:
1524 if working and modified and not self.dirstate._checklink:
1521 # Symlink placeholders may get non-symlink-like contents
1525 # Symlink placeholders may get non-symlink-like contents
1522 # via user error or dereferencing by NFS or Samba servers,
1526 # via user error or dereferencing by NFS or Samba servers,
1523 # so we filter out any placeholders that don't look like a
1527 # so we filter out any placeholders that don't look like a
1524 # symlink
1528 # symlink
1525 sane = []
1529 sane = []
1526 for f in modified:
1530 for f in modified:
1527 if ctx2.flags(f) == 'l':
1531 if ctx2.flags(f) == 'l':
1528 d = ctx2[f].data()
1532 d = ctx2[f].data()
1529 if len(d) >= 1024 or '\n' in d or util.binary(d):
1533 if len(d) >= 1024 or '\n' in d or util.binary(d):
1530 self.ui.debug('ignoring suspect symlink placeholder'
1534 self.ui.debug('ignoring suspect symlink placeholder'
1531 ' "%s"\n' % f)
1535 ' "%s"\n' % f)
1532 continue
1536 continue
1533 sane.append(f)
1537 sane.append(f)
1534 modified = sane
1538 modified = sane
1535
1539
1536 r = modified, added, removed, deleted, unknown, ignored, clean
1540 r = modified, added, removed, deleted, unknown, ignored, clean
1537
1541
1538 if listsubrepos:
1542 if listsubrepos:
1539 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1543 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1540 if working:
1544 if working:
1541 rev2 = None
1545 rev2 = None
1542 else:
1546 else:
1543 rev2 = ctx2.substate[subpath][1]
1547 rev2 = ctx2.substate[subpath][1]
1544 try:
1548 try:
1545 submatch = matchmod.narrowmatcher(subpath, match)
1549 submatch = matchmod.narrowmatcher(subpath, match)
1546 s = sub.status(rev2, match=submatch, ignored=listignored,
1550 s = sub.status(rev2, match=submatch, ignored=listignored,
1547 clean=listclean, unknown=listunknown,
1551 clean=listclean, unknown=listunknown,
1548 listsubrepos=True)
1552 listsubrepos=True)
1549 for rfiles, sfiles in zip(r, s):
1553 for rfiles, sfiles in zip(r, s):
1550 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1554 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1551 except error.LookupError:
1555 except error.LookupError:
1552 self.ui.status(_("skipping missing subrepository: %s\n")
1556 self.ui.status(_("skipping missing subrepository: %s\n")
1553 % subpath)
1557 % subpath)
1554
1558
1555 for l in r:
1559 for l in r:
1556 l.sort()
1560 l.sort()
1557 return r
1561 return r
1558
1562
1559 def heads(self, start=None):
1563 def heads(self, start=None):
1560 heads = self.changelog.heads(start)
1564 heads = self.changelog.heads(start)
1561 # sort the output in rev descending order
1565 # sort the output in rev descending order
1562 return sorted(heads, key=self.changelog.rev, reverse=True)
1566 return sorted(heads, key=self.changelog.rev, reverse=True)
1563
1567
1564 def branchheads(self, branch=None, start=None, closed=False):
1568 def branchheads(self, branch=None, start=None, closed=False):
1565 '''return a (possibly filtered) list of heads for the given branch
1569 '''return a (possibly filtered) list of heads for the given branch
1566
1570
1567 Heads are returned in topological order, from newest to oldest.
1571 Heads are returned in topological order, from newest to oldest.
1568 If branch is None, use the dirstate branch.
1572 If branch is None, use the dirstate branch.
1569 If start is not None, return only heads reachable from start.
1573 If start is not None, return only heads reachable from start.
1570 If closed is True, return heads that are marked as closed as well.
1574 If closed is True, return heads that are marked as closed as well.
1571 '''
1575 '''
1572 if branch is None:
1576 if branch is None:
1573 branch = self[None].branch()
1577 branch = self[None].branch()
1574 branches = self.branchmap()
1578 branches = self.branchmap()
1575 if branch not in branches:
1579 if branch not in branches:
1576 return []
1580 return []
1577 # the cache returns heads ordered lowest to highest
1581 # the cache returns heads ordered lowest to highest
1578 bheads = list(reversed(branches[branch]))
1582 bheads = list(reversed(branches[branch]))
1579 if start is not None:
1583 if start is not None:
1580 # filter out the heads that cannot be reached from startrev
1584 # filter out the heads that cannot be reached from startrev
1581 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1585 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1582 bheads = [h for h in bheads if h in fbheads]
1586 bheads = [h for h in bheads if h in fbheads]
1583 if not closed:
1587 if not closed:
1584 bheads = [h for h in bheads if not self[h].closesbranch()]
1588 bheads = [h for h in bheads if not self[h].closesbranch()]
1585 return bheads
1589 return bheads
1586
1590
1587 def branches(self, nodes):
1591 def branches(self, nodes):
1588 if not nodes:
1592 if not nodes:
1589 nodes = [self.changelog.tip()]
1593 nodes = [self.changelog.tip()]
1590 b = []
1594 b = []
1591 for n in nodes:
1595 for n in nodes:
1592 t = n
1596 t = n
1593 while True:
1597 while True:
1594 p = self.changelog.parents(n)
1598 p = self.changelog.parents(n)
1595 if p[1] != nullid or p[0] == nullid:
1599 if p[1] != nullid or p[0] == nullid:
1596 b.append((t, n, p[0], p[1]))
1600 b.append((t, n, p[0], p[1]))
1597 break
1601 break
1598 n = p[0]
1602 n = p[0]
1599 return b
1603 return b
1600
1604
1601 def between(self, pairs):
1605 def between(self, pairs):
1602 r = []
1606 r = []
1603
1607
1604 for top, bottom in pairs:
1608 for top, bottom in pairs:
1605 n, l, i = top, [], 0
1609 n, l, i = top, [], 0
1606 f = 1
1610 f = 1
1607
1611
1608 while n != bottom and n != nullid:
1612 while n != bottom and n != nullid:
1609 p = self.changelog.parents(n)[0]
1613 p = self.changelog.parents(n)[0]
1610 if i == f:
1614 if i == f:
1611 l.append(n)
1615 l.append(n)
1612 f = f * 2
1616 f = f * 2
1613 n = p
1617 n = p
1614 i += 1
1618 i += 1
1615
1619
1616 r.append(l)
1620 r.append(l)
1617
1621
1618 return r
1622 return r
1619
1623
1620 def pull(self, remote, heads=None, force=False):
1624 def pull(self, remote, heads=None, force=False):
1621 lock = self.lock()
1625 lock = self.lock()
1622 try:
1626 try:
1623 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1627 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1624 force=force)
1628 force=force)
1625 common, fetch, rheads = tmp
1629 common, fetch, rheads = tmp
1626 if not fetch:
1630 if not fetch:
1627 self.ui.status(_("no changes found\n"))
1631 self.ui.status(_("no changes found\n"))
1628 added = []
1632 added = []
1629 result = 0
1633 result = 0
1630 else:
1634 else:
1631 if heads is None and list(common) == [nullid]:
1635 if heads is None and list(common) == [nullid]:
1632 self.ui.status(_("requesting all changes\n"))
1636 self.ui.status(_("requesting all changes\n"))
1633 elif heads is None and remote.capable('changegroupsubset'):
1637 elif heads is None and remote.capable('changegroupsubset'):
1634 # issue1320, avoid a race if remote changed after discovery
1638 # issue1320, avoid a race if remote changed after discovery
1635 heads = rheads
1639 heads = rheads
1636
1640
1637 if remote.capable('getbundle'):
1641 if remote.capable('getbundle'):
1638 cg = remote.getbundle('pull', common=common,
1642 cg = remote.getbundle('pull', common=common,
1639 heads=heads or rheads)
1643 heads=heads or rheads)
1640 elif heads is None:
1644 elif heads is None:
1641 cg = remote.changegroup(fetch, 'pull')
1645 cg = remote.changegroup(fetch, 'pull')
1642 elif not remote.capable('changegroupsubset'):
1646 elif not remote.capable('changegroupsubset'):
1643 raise util.Abort(_("partial pull cannot be done because "
1647 raise util.Abort(_("partial pull cannot be done because "
1644 "other repository doesn't support "
1648 "other repository doesn't support "
1645 "changegroupsubset."))
1649 "changegroupsubset."))
1646 else:
1650 else:
1647 cg = remote.changegroupsubset(fetch, heads, 'pull')
1651 cg = remote.changegroupsubset(fetch, heads, 'pull')
1648 clstart = len(self.changelog)
1652 clstart = len(self.changelog)
1649 result = self.addchangegroup(cg, 'pull', remote.url())
1653 result = self.addchangegroup(cg, 'pull', remote.url())
1650 clend = len(self.changelog)
1654 clend = len(self.changelog)
1651 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1655 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1652
1656
1653 # compute target subset
1657 # compute target subset
1654 if heads is None:
1658 if heads is None:
1655 # We pulled every thing possible
1659 # We pulled every thing possible
1656 # sync on everything common
1660 # sync on everything common
1657 subset = common + added
1661 subset = common + added
1658 else:
1662 else:
1659 # We pulled a specific subset
1663 # We pulled a specific subset
1660 # sync on this subset
1664 # sync on this subset
1661 subset = heads
1665 subset = heads
1662
1666
1663 # Get remote phases data from remote
1667 # Get remote phases data from remote
1664 remotephases = remote.listkeys('phases')
1668 remotephases = remote.listkeys('phases')
1665 publishing = bool(remotephases.get('publishing', False))
1669 publishing = bool(remotephases.get('publishing', False))
1666 if remotephases and not publishing:
1670 if remotephases and not publishing:
1667 # remote is new and unpublishing
1671 # remote is new and unpublishing
1668 pheads, _dr = phases.analyzeremotephases(self, subset,
1672 pheads, _dr = phases.analyzeremotephases(self, subset,
1669 remotephases)
1673 remotephases)
1670 phases.advanceboundary(self, phases.public, pheads)
1674 phases.advanceboundary(self, phases.public, pheads)
1671 phases.advanceboundary(self, phases.draft, subset)
1675 phases.advanceboundary(self, phases.draft, subset)
1672 else:
1676 else:
1673 # Remote is old or publishing all common changesets
1677 # Remote is old or publishing all common changesets
1674 # should be seen as public
1678 # should be seen as public
1675 phases.advanceboundary(self, phases.public, subset)
1679 phases.advanceboundary(self, phases.public, subset)
1676
1680
1677 remoteobs = remote.listkeys('obsolete')
1681 remoteobs = remote.listkeys('obsolete')
1678 if 'dump' in remoteobs:
1682 if 'dump' in remoteobs:
1679 data = base85.b85decode(remoteobs['dump'])
1683 data = base85.b85decode(remoteobs['dump'])
1680 self.obsstore.mergemarkers(data)
1684 self.obsstore.mergemarkers(data)
1681 finally:
1685 finally:
1682 lock.release()
1686 lock.release()
1683
1687
1684 return result
1688 return result
1685
1689
1686 def checkpush(self, force, revs):
1690 def checkpush(self, force, revs):
1687 """Extensions can override this function if additional checks have
1691 """Extensions can override this function if additional checks have
1688 to be performed before pushing, or call it if they override push
1692 to be performed before pushing, or call it if they override push
1689 command.
1693 command.
1690 """
1694 """
1691 pass
1695 pass
1692
1696
1693 def push(self, remote, force=False, revs=None, newbranch=False):
1697 def push(self, remote, force=False, revs=None, newbranch=False):
1694 '''Push outgoing changesets (limited by revs) from the current
1698 '''Push outgoing changesets (limited by revs) from the current
1695 repository to remote. Return an integer:
1699 repository to remote. Return an integer:
1696 - None means nothing to push
1700 - None means nothing to push
1697 - 0 means HTTP error
1701 - 0 means HTTP error
1698 - 1 means we pushed and remote head count is unchanged *or*
1702 - 1 means we pushed and remote head count is unchanged *or*
1699 we have outgoing changesets but refused to push
1703 we have outgoing changesets but refused to push
1700 - other values as described by addchangegroup()
1704 - other values as described by addchangegroup()
1701 '''
1705 '''
1702 # there are two ways to push to remote repo:
1706 # there are two ways to push to remote repo:
1703 #
1707 #
1704 # addchangegroup assumes local user can lock remote
1708 # addchangegroup assumes local user can lock remote
1705 # repo (local filesystem, old ssh servers).
1709 # repo (local filesystem, old ssh servers).
1706 #
1710 #
1707 # unbundle assumes local user cannot lock remote repo (new ssh
1711 # unbundle assumes local user cannot lock remote repo (new ssh
1708 # servers, http servers).
1712 # servers, http servers).
1709
1713
1710 # get local lock as we might write phase data
1714 # get local lock as we might write phase data
1711 locallock = self.lock()
1715 locallock = self.lock()
1712 try:
1716 try:
1713 self.checkpush(force, revs)
1717 self.checkpush(force, revs)
1714 lock = None
1718 lock = None
1715 unbundle = remote.capable('unbundle')
1719 unbundle = remote.capable('unbundle')
1716 if not unbundle:
1720 if not unbundle:
1717 lock = remote.lock()
1721 lock = remote.lock()
1718 try:
1722 try:
1719 # discovery
1723 # discovery
1720 fci = discovery.findcommonincoming
1724 fci = discovery.findcommonincoming
1721 commoninc = fci(self, remote, force=force)
1725 commoninc = fci(self, remote, force=force)
1722 common, inc, remoteheads = commoninc
1726 common, inc, remoteheads = commoninc
1723 fco = discovery.findcommonoutgoing
1727 fco = discovery.findcommonoutgoing
1724 outgoing = fco(self, remote, onlyheads=revs,
1728 outgoing = fco(self, remote, onlyheads=revs,
1725 commoninc=commoninc, force=force)
1729 commoninc=commoninc, force=force)
1726
1730
1727
1731
1728 if not outgoing.missing:
1732 if not outgoing.missing:
1729 # nothing to push
1733 # nothing to push
1730 scmutil.nochangesfound(self.ui, outgoing.excluded)
1734 scmutil.nochangesfound(self.ui, outgoing.excluded)
1731 ret = None
1735 ret = None
1732 else:
1736 else:
1733 # something to push
1737 # something to push
1734 if not force:
1738 if not force:
1735 discovery.checkheads(self, remote, outgoing,
1739 discovery.checkheads(self, remote, outgoing,
1736 remoteheads, newbranch,
1740 remoteheads, newbranch,
1737 bool(inc))
1741 bool(inc))
1738
1742
1739 # create a changegroup from local
1743 # create a changegroup from local
1740 if revs is None and not outgoing.excluded:
1744 if revs is None and not outgoing.excluded:
1741 # push everything,
1745 # push everything,
1742 # use the fast path, no race possible on push
1746 # use the fast path, no race possible on push
1743 cg = self._changegroup(outgoing.missing, 'push')
1747 cg = self._changegroup(outgoing.missing, 'push')
1744 else:
1748 else:
1745 cg = self.getlocalbundle('push', outgoing)
1749 cg = self.getlocalbundle('push', outgoing)
1746
1750
1747 # apply changegroup to remote
1751 # apply changegroup to remote
1748 if unbundle:
1752 if unbundle:
1749 # local repo finds heads on server, finds out what
1753 # local repo finds heads on server, finds out what
1750 # revs it must push. once revs transferred, if server
1754 # revs it must push. once revs transferred, if server
1751 # finds it has different heads (someone else won
1755 # finds it has different heads (someone else won
1752 # commit/push race), server aborts.
1756 # commit/push race), server aborts.
1753 if force:
1757 if force:
1754 remoteheads = ['force']
1758 remoteheads = ['force']
1755 # ssh: return remote's addchangegroup()
1759 # ssh: return remote's addchangegroup()
1756 # http: return remote's addchangegroup() or 0 for error
1760 # http: return remote's addchangegroup() or 0 for error
1757 ret = remote.unbundle(cg, remoteheads, 'push')
1761 ret = remote.unbundle(cg, remoteheads, 'push')
1758 else:
1762 else:
1759 # we return an integer indicating remote head count
1763 # we return an integer indicating remote head count
1760 # change
1764 # change
1761 ret = remote.addchangegroup(cg, 'push', self.url())
1765 ret = remote.addchangegroup(cg, 'push', self.url())
1762
1766
1763 if ret:
1767 if ret:
1764 # push succeed, synchonize target of the push
1768 # push succeed, synchonize target of the push
1765 cheads = outgoing.missingheads
1769 cheads = outgoing.missingheads
1766 elif revs is None:
1770 elif revs is None:
1767 # All out push fails. synchronize all common
1771 # All out push fails. synchronize all common
1768 cheads = outgoing.commonheads
1772 cheads = outgoing.commonheads
1769 else:
1773 else:
1770 # I want cheads = heads(::missingheads and ::commonheads)
1774 # I want cheads = heads(::missingheads and ::commonheads)
1771 # (missingheads is revs with secret changeset filtered out)
1775 # (missingheads is revs with secret changeset filtered out)
1772 #
1776 #
1773 # This can be expressed as:
1777 # This can be expressed as:
1774 # cheads = ( (missingheads and ::commonheads)
1778 # cheads = ( (missingheads and ::commonheads)
1775 # + (commonheads and ::missingheads))"
1779 # + (commonheads and ::missingheads))"
1776 # )
1780 # )
1777 #
1781 #
1778 # while trying to push we already computed the following:
1782 # while trying to push we already computed the following:
1779 # common = (::commonheads)
1783 # common = (::commonheads)
1780 # missing = ((commonheads::missingheads) - commonheads)
1784 # missing = ((commonheads::missingheads) - commonheads)
1781 #
1785 #
1782 # We can pick:
1786 # We can pick:
1783 # * missingheads part of comon (::commonheads)
1787 # * missingheads part of comon (::commonheads)
1784 common = set(outgoing.common)
1788 common = set(outgoing.common)
1785 cheads = [node for node in revs if node in common]
1789 cheads = [node for node in revs if node in common]
1786 # and
1790 # and
1787 # * commonheads parents on missing
1791 # * commonheads parents on missing
1788 revset = self.set('%ln and parents(roots(%ln))',
1792 revset = self.set('%ln and parents(roots(%ln))',
1789 outgoing.commonheads,
1793 outgoing.commonheads,
1790 outgoing.missing)
1794 outgoing.missing)
1791 cheads.extend(c.node() for c in revset)
1795 cheads.extend(c.node() for c in revset)
1792 # even when we don't push, exchanging phase data is useful
1796 # even when we don't push, exchanging phase data is useful
1793 remotephases = remote.listkeys('phases')
1797 remotephases = remote.listkeys('phases')
1794 if not remotephases: # old server or public only repo
1798 if not remotephases: # old server or public only repo
1795 phases.advanceboundary(self, phases.public, cheads)
1799 phases.advanceboundary(self, phases.public, cheads)
1796 # don't push any phase data as there is nothing to push
1800 # don't push any phase data as there is nothing to push
1797 else:
1801 else:
1798 ana = phases.analyzeremotephases(self, cheads, remotephases)
1802 ana = phases.analyzeremotephases(self, cheads, remotephases)
1799 pheads, droots = ana
1803 pheads, droots = ana
1800 ### Apply remote phase on local
1804 ### Apply remote phase on local
1801 if remotephases.get('publishing', False):
1805 if remotephases.get('publishing', False):
1802 phases.advanceboundary(self, phases.public, cheads)
1806 phases.advanceboundary(self, phases.public, cheads)
1803 else: # publish = False
1807 else: # publish = False
1804 phases.advanceboundary(self, phases.public, pheads)
1808 phases.advanceboundary(self, phases.public, pheads)
1805 phases.advanceboundary(self, phases.draft, cheads)
1809 phases.advanceboundary(self, phases.draft, cheads)
1806 ### Apply local phase on remote
1810 ### Apply local phase on remote
1807
1811
1808 # Get the list of all revs draft on remote by public here.
1812 # Get the list of all revs draft on remote by public here.
1809 # XXX Beware that revset break if droots is not strictly
1813 # XXX Beware that revset break if droots is not strictly
1810 # XXX root we may want to ensure it is but it is costly
1814 # XXX root we may want to ensure it is but it is costly
1811 outdated = self.set('heads((%ln::%ln) and public())',
1815 outdated = self.set('heads((%ln::%ln) and public())',
1812 droots, cheads)
1816 droots, cheads)
1813 for newremotehead in outdated:
1817 for newremotehead in outdated:
1814 r = remote.pushkey('phases',
1818 r = remote.pushkey('phases',
1815 newremotehead.hex(),
1819 newremotehead.hex(),
1816 str(phases.draft),
1820 str(phases.draft),
1817 str(phases.public))
1821 str(phases.public))
1818 if not r:
1822 if not r:
1819 self.ui.warn(_('updating %s to public failed!\n')
1823 self.ui.warn(_('updating %s to public failed!\n')
1820 % newremotehead)
1824 % newremotehead)
1821 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1825 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1822 data = self.obsstore._writemarkers()
1826 data = self.obsstore._writemarkers()
1823 r = remote.pushkey('obsolete', 'dump', '',
1827 r = remote.pushkey('obsolete', 'dump', '',
1824 base85.b85encode(data))
1828 base85.b85encode(data))
1825 if not r:
1829 if not r:
1826 self.ui.warn(_('failed to push obsolete markers!\n'))
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1827 finally:
1831 finally:
1828 if lock is not None:
1832 if lock is not None:
1829 lock.release()
1833 lock.release()
1830 finally:
1834 finally:
1831 locallock.release()
1835 locallock.release()
1832
1836
1833 self.ui.debug("checking for updated bookmarks\n")
1837 self.ui.debug("checking for updated bookmarks\n")
1834 rb = remote.listkeys('bookmarks')
1838 rb = remote.listkeys('bookmarks')
1835 for k in rb.keys():
1839 for k in rb.keys():
1836 if k in self._bookmarks:
1840 if k in self._bookmarks:
1837 nr, nl = rb[k], hex(self._bookmarks[k])
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1838 if nr in self:
1842 if nr in self:
1839 cr = self[nr]
1843 cr = self[nr]
1840 cl = self[nl]
1844 cl = self[nl]
1841 if cl in cr.descendants():
1845 if cl in cr.descendants():
1842 r = remote.pushkey('bookmarks', k, nr, nl)
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1843 if r:
1847 if r:
1844 self.ui.status(_("updating bookmark %s\n") % k)
1848 self.ui.status(_("updating bookmark %s\n") % k)
1845 else:
1849 else:
1846 self.ui.warn(_('updating bookmark %s'
1850 self.ui.warn(_('updating bookmark %s'
1847 ' failed!\n') % k)
1851 ' failed!\n') % k)
1848
1852
1849 return ret
1853 return ret
1850
1854
1851 def changegroupinfo(self, nodes, source):
1855 def changegroupinfo(self, nodes, source):
1852 if self.ui.verbose or source == 'bundle':
1856 if self.ui.verbose or source == 'bundle':
1853 self.ui.status(_("%d changesets found\n") % len(nodes))
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1854 if self.ui.debugflag:
1858 if self.ui.debugflag:
1855 self.ui.debug("list of changesets:\n")
1859 self.ui.debug("list of changesets:\n")
1856 for node in nodes:
1860 for node in nodes:
1857 self.ui.debug("%s\n" % hex(node))
1861 self.ui.debug("%s\n" % hex(node))
1858
1862
1859 def changegroupsubset(self, bases, heads, source):
1863 def changegroupsubset(self, bases, heads, source):
1860 """Compute a changegroup consisting of all the nodes that are
1864 """Compute a changegroup consisting of all the nodes that are
1861 descendants of any of the bases and ancestors of any of the heads.
1865 descendants of any of the bases and ancestors of any of the heads.
1862 Return a chunkbuffer object whose read() method will return
1866 Return a chunkbuffer object whose read() method will return
1863 successive changegroup chunks.
1867 successive changegroup chunks.
1864
1868
1865 It is fairly complex as determining which filenodes and which
1869 It is fairly complex as determining which filenodes and which
1866 manifest nodes need to be included for the changeset to be complete
1870 manifest nodes need to be included for the changeset to be complete
1867 is non-trivial.
1871 is non-trivial.
1868
1872
1869 Another wrinkle is doing the reverse, figuring out which changeset in
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1870 the changegroup a particular filenode or manifestnode belongs to.
1874 the changegroup a particular filenode or manifestnode belongs to.
1871 """
1875 """
1872 cl = self.changelog
1876 cl = self.changelog
1873 if not bases:
1877 if not bases:
1874 bases = [nullid]
1878 bases = [nullid]
1875 csets, bases, heads = cl.nodesbetween(bases, heads)
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1876 # We assume that all ancestors of bases are known
1880 # We assume that all ancestors of bases are known
1877 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1878 return self._changegroupsubset(common, csets, heads, source)
1882 return self._changegroupsubset(common, csets, heads, source)
1879
1883
1880 def getlocalbundle(self, source, outgoing):
1884 def getlocalbundle(self, source, outgoing):
1881 """Like getbundle, but taking a discovery.outgoing as an argument.
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1882
1886
1883 This is only implemented for local repos and reuses potentially
1887 This is only implemented for local repos and reuses potentially
1884 precomputed sets in outgoing."""
1888 precomputed sets in outgoing."""
1885 if not outgoing.missing:
1889 if not outgoing.missing:
1886 return None
1890 return None
1887 return self._changegroupsubset(outgoing.common,
1891 return self._changegroupsubset(outgoing.common,
1888 outgoing.missing,
1892 outgoing.missing,
1889 outgoing.missingheads,
1893 outgoing.missingheads,
1890 source)
1894 source)
1891
1895
1892 def getbundle(self, source, heads=None, common=None):
1896 def getbundle(self, source, heads=None, common=None):
1893 """Like changegroupsubset, but returns the set difference between the
1897 """Like changegroupsubset, but returns the set difference between the
1894 ancestors of heads and the ancestors common.
1898 ancestors of heads and the ancestors common.
1895
1899
1896 If heads is None, use the local heads. If common is None, use [nullid].
1900 If heads is None, use the local heads. If common is None, use [nullid].
1897
1901
1898 The nodes in common might not all be known locally due to the way the
1902 The nodes in common might not all be known locally due to the way the
1899 current discovery protocol works.
1903 current discovery protocol works.
1900 """
1904 """
1901 cl = self.changelog
1905 cl = self.changelog
1902 if common:
1906 if common:
1903 nm = cl.nodemap
1907 nm = cl.nodemap
1904 common = [n for n in common if n in nm]
1908 common = [n for n in common if n in nm]
1905 else:
1909 else:
1906 common = [nullid]
1910 common = [nullid]
1907 if not heads:
1911 if not heads:
1908 heads = cl.heads()
1912 heads = cl.heads()
1909 return self.getlocalbundle(source,
1913 return self.getlocalbundle(source,
1910 discovery.outgoing(cl, common, heads))
1914 discovery.outgoing(cl, common, heads))
1911
1915
1912 def _changegroupsubset(self, commonrevs, csets, heads, source):
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1913
1917
1914 cl = self.changelog
1918 cl = self.changelog
1915 mf = self.manifest
1919 mf = self.manifest
1916 mfs = {} # needed manifests
1920 mfs = {} # needed manifests
1917 fnodes = {} # needed file nodes
1921 fnodes = {} # needed file nodes
1918 changedfiles = set()
1922 changedfiles = set()
1919 fstate = ['', {}]
1923 fstate = ['', {}]
1920 count = [0, 0]
1924 count = [0, 0]
1921
1925
1922 # can we go through the fast path ?
1926 # can we go through the fast path ?
1923 heads.sort()
1927 heads.sort()
1924 if heads == sorted(self.heads()):
1928 if heads == sorted(self.heads()):
1925 return self._changegroup(csets, source)
1929 return self._changegroup(csets, source)
1926
1930
1927 # slow path
1931 # slow path
1928 self.hook('preoutgoing', throw=True, source=source)
1932 self.hook('preoutgoing', throw=True, source=source)
1929 self.changegroupinfo(csets, source)
1933 self.changegroupinfo(csets, source)
1930
1934
1931 # filter any nodes that claim to be part of the known set
1935 # filter any nodes that claim to be part of the known set
1932 def prune(revlog, missing):
1936 def prune(revlog, missing):
1933 rr, rl = revlog.rev, revlog.linkrev
1937 rr, rl = revlog.rev, revlog.linkrev
1934 return [n for n in missing
1938 return [n for n in missing
1935 if rl(rr(n)) not in commonrevs]
1939 if rl(rr(n)) not in commonrevs]
1936
1940
1937 progress = self.ui.progress
1941 progress = self.ui.progress
1938 _bundling = _('bundling')
1942 _bundling = _('bundling')
1939 _changesets = _('changesets')
1943 _changesets = _('changesets')
1940 _manifests = _('manifests')
1944 _manifests = _('manifests')
1941 _files = _('files')
1945 _files = _('files')
1942
1946
1943 def lookup(revlog, x):
1947 def lookup(revlog, x):
1944 if revlog == cl:
1948 if revlog == cl:
1945 c = cl.read(x)
1949 c = cl.read(x)
1946 changedfiles.update(c[3])
1950 changedfiles.update(c[3])
1947 mfs.setdefault(c[0], x)
1951 mfs.setdefault(c[0], x)
1948 count[0] += 1
1952 count[0] += 1
1949 progress(_bundling, count[0],
1953 progress(_bundling, count[0],
1950 unit=_changesets, total=count[1])
1954 unit=_changesets, total=count[1])
1951 return x
1955 return x
1952 elif revlog == mf:
1956 elif revlog == mf:
1953 clnode = mfs[x]
1957 clnode = mfs[x]
1954 mdata = mf.readfast(x)
1958 mdata = mf.readfast(x)
1955 for f, n in mdata.iteritems():
1959 for f, n in mdata.iteritems():
1956 if f in changedfiles:
1960 if f in changedfiles:
1957 fnodes[f].setdefault(n, clnode)
1961 fnodes[f].setdefault(n, clnode)
1958 count[0] += 1
1962 count[0] += 1
1959 progress(_bundling, count[0],
1963 progress(_bundling, count[0],
1960 unit=_manifests, total=count[1])
1964 unit=_manifests, total=count[1])
1961 return clnode
1965 return clnode
1962 else:
1966 else:
1963 progress(_bundling, count[0], item=fstate[0],
1967 progress(_bundling, count[0], item=fstate[0],
1964 unit=_files, total=count[1])
1968 unit=_files, total=count[1])
1965 return fstate[1][x]
1969 return fstate[1][x]
1966
1970
1967 bundler = changegroup.bundle10(lookup)
1971 bundler = changegroup.bundle10(lookup)
1968 reorder = self.ui.config('bundle', 'reorder', 'auto')
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1969 if reorder == 'auto':
1973 if reorder == 'auto':
1970 reorder = None
1974 reorder = None
1971 else:
1975 else:
1972 reorder = util.parsebool(reorder)
1976 reorder = util.parsebool(reorder)
1973
1977
1974 def gengroup():
1978 def gengroup():
1975 # Create a changenode group generator that will call our functions
1979 # Create a changenode group generator that will call our functions
1976 # back to lookup the owning changenode and collect information.
1980 # back to lookup the owning changenode and collect information.
1977 count[:] = [0, len(csets)]
1981 count[:] = [0, len(csets)]
1978 for chunk in cl.group(csets, bundler, reorder=reorder):
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1979 yield chunk
1983 yield chunk
1980 progress(_bundling, None)
1984 progress(_bundling, None)
1981
1985
1982 # Create a generator for the manifestnodes that calls our lookup
1986 # Create a generator for the manifestnodes that calls our lookup
1983 # and data collection functions back.
1987 # and data collection functions back.
1984 for f in changedfiles:
1988 for f in changedfiles:
1985 fnodes[f] = {}
1989 fnodes[f] = {}
1986 count[:] = [0, len(mfs)]
1990 count[:] = [0, len(mfs)]
1987 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1988 yield chunk
1992 yield chunk
1989 progress(_bundling, None)
1993 progress(_bundling, None)
1990
1994
1991 mfs.clear()
1995 mfs.clear()
1992
1996
1993 # Go through all our files in order sorted by name.
1997 # Go through all our files in order sorted by name.
1994 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
1995 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
1996 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
1997 if not len(filerevlog):
2001 if not len(filerevlog):
1998 raise util.Abort(_("empty or missing revlog for %s")
2002 raise util.Abort(_("empty or missing revlog for %s")
1999 % fname)
2003 % fname)
2000 fstate[0] = fname
2004 fstate[0] = fname
2001 fstate[1] = fnodes.pop(fname, {})
2005 fstate[1] = fnodes.pop(fname, {})
2002
2006
2003 nodelist = prune(filerevlog, fstate[1])
2007 nodelist = prune(filerevlog, fstate[1])
2004 if nodelist:
2008 if nodelist:
2005 count[0] += 1
2009 count[0] += 1
2006 yield bundler.fileheader(fname)
2010 yield bundler.fileheader(fname)
2007 for chunk in filerevlog.group(nodelist, bundler, reorder):
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2008 yield chunk
2012 yield chunk
2009
2013
2010 # Signal that no more groups are left.
2014 # Signal that no more groups are left.
2011 yield bundler.close()
2015 yield bundler.close()
2012 progress(_bundling, None)
2016 progress(_bundling, None)
2013
2017
2014 if csets:
2018 if csets:
2015 self.hook('outgoing', node=hex(csets[0]), source=source)
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2016
2020
2017 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018
2022
2019 def changegroup(self, basenodes, source):
2023 def changegroup(self, basenodes, source):
2020 # to avoid a race we use changegroupsubset() (issue1320)
2024 # to avoid a race we use changegroupsubset() (issue1320)
2021 return self.changegroupsubset(basenodes, self.heads(), source)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2022
2026
2023 def _changegroup(self, nodes, source):
2027 def _changegroup(self, nodes, source):
2024 """Compute the changegroup of all nodes that we have that a recipient
2028 """Compute the changegroup of all nodes that we have that a recipient
2025 doesn't. Return a chunkbuffer object whose read() method will return
2029 doesn't. Return a chunkbuffer object whose read() method will return
2026 successive changegroup chunks.
2030 successive changegroup chunks.
2027
2031
2028 This is much easier than the previous function as we can assume that
2032 This is much easier than the previous function as we can assume that
2029 the recipient has any changenode we aren't sending them.
2033 the recipient has any changenode we aren't sending them.
2030
2034
2031 nodes is the set of nodes to send"""
2035 nodes is the set of nodes to send"""
2032
2036
2033 cl = self.changelog
2037 cl = self.changelog
2034 mf = self.manifest
2038 mf = self.manifest
2035 mfs = {}
2039 mfs = {}
2036 changedfiles = set()
2040 changedfiles = set()
2037 fstate = ['']
2041 fstate = ['']
2038 count = [0, 0]
2042 count = [0, 0]
2039
2043
2040 self.hook('preoutgoing', throw=True, source=source)
2044 self.hook('preoutgoing', throw=True, source=source)
2041 self.changegroupinfo(nodes, source)
2045 self.changegroupinfo(nodes, source)
2042
2046
2043 revset = set([cl.rev(n) for n in nodes])
2047 revset = set([cl.rev(n) for n in nodes])
2044
2048
2045 def gennodelst(log):
2049 def gennodelst(log):
2046 ln, llr = log.node, log.linkrev
2050 ln, llr = log.node, log.linkrev
2047 return [ln(r) for r in log if llr(r) in revset]
2051 return [ln(r) for r in log if llr(r) in revset]
2048
2052
2049 progress = self.ui.progress
2053 progress = self.ui.progress
2050 _bundling = _('bundling')
2054 _bundling = _('bundling')
2051 _changesets = _('changesets')
2055 _changesets = _('changesets')
2052 _manifests = _('manifests')
2056 _manifests = _('manifests')
2053 _files = _('files')
2057 _files = _('files')
2054
2058
2055 def lookup(revlog, x):
2059 def lookup(revlog, x):
2056 if revlog == cl:
2060 if revlog == cl:
2057 c = cl.read(x)
2061 c = cl.read(x)
2058 changedfiles.update(c[3])
2062 changedfiles.update(c[3])
2059 mfs.setdefault(c[0], x)
2063 mfs.setdefault(c[0], x)
2060 count[0] += 1
2064 count[0] += 1
2061 progress(_bundling, count[0],
2065 progress(_bundling, count[0],
2062 unit=_changesets, total=count[1])
2066 unit=_changesets, total=count[1])
2063 return x
2067 return x
2064 elif revlog == mf:
2068 elif revlog == mf:
2065 count[0] += 1
2069 count[0] += 1
2066 progress(_bundling, count[0],
2070 progress(_bundling, count[0],
2067 unit=_manifests, total=count[1])
2071 unit=_manifests, total=count[1])
2068 return cl.node(revlog.linkrev(revlog.rev(x)))
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2069 else:
2073 else:
2070 progress(_bundling, count[0], item=fstate[0],
2074 progress(_bundling, count[0], item=fstate[0],
2071 total=count[1], unit=_files)
2075 total=count[1], unit=_files)
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2073
2077
2074 bundler = changegroup.bundle10(lookup)
2078 bundler = changegroup.bundle10(lookup)
2075 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2076 if reorder == 'auto':
2080 if reorder == 'auto':
2077 reorder = None
2081 reorder = None
2078 else:
2082 else:
2079 reorder = util.parsebool(reorder)
2083 reorder = util.parsebool(reorder)
2080
2084
2081 def gengroup():
2085 def gengroup():
2082 '''yield a sequence of changegroup chunks (strings)'''
2086 '''yield a sequence of changegroup chunks (strings)'''
2083 # construct a list of all changed files
2087 # construct a list of all changed files
2084
2088
2085 count[:] = [0, len(nodes)]
2089 count[:] = [0, len(nodes)]
2086 for chunk in cl.group(nodes, bundler, reorder=reorder):
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2087 yield chunk
2091 yield chunk
2088 progress(_bundling, None)
2092 progress(_bundling, None)
2089
2093
2090 count[:] = [0, len(mfs)]
2094 count[:] = [0, len(mfs)]
2091 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2092 yield chunk
2096 yield chunk
2093 progress(_bundling, None)
2097 progress(_bundling, None)
2094
2098
2095 count[:] = [0, len(changedfiles)]
2099 count[:] = [0, len(changedfiles)]
2096 for fname in sorted(changedfiles):
2100 for fname in sorted(changedfiles):
2097 filerevlog = self.file(fname)
2101 filerevlog = self.file(fname)
2098 if not len(filerevlog):
2102 if not len(filerevlog):
2099 raise util.Abort(_("empty or missing revlog for %s")
2103 raise util.Abort(_("empty or missing revlog for %s")
2100 % fname)
2104 % fname)
2101 fstate[0] = fname
2105 fstate[0] = fname
2102 nodelist = gennodelst(filerevlog)
2106 nodelist = gennodelst(filerevlog)
2103 if nodelist:
2107 if nodelist:
2104 count[0] += 1
2108 count[0] += 1
2105 yield bundler.fileheader(fname)
2109 yield bundler.fileheader(fname)
2106 for chunk in filerevlog.group(nodelist, bundler, reorder):
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2107 yield chunk
2111 yield chunk
2108 yield bundler.close()
2112 yield bundler.close()
2109 progress(_bundling, None)
2113 progress(_bundling, None)
2110
2114
2111 if nodes:
2115 if nodes:
2112 self.hook('outgoing', node=hex(nodes[0]), source=source)
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2113
2117
2114 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2115
2119
2116 def addchangegroup(self, source, srctype, url, emptyok=False):
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2117 """Add the changegroup returned by source.read() to this repo.
2121 """Add the changegroup returned by source.read() to this repo.
2118 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2119 the URL of the repo where this changegroup is coming from.
2123 the URL of the repo where this changegroup is coming from.
2120
2124
2121 Return an integer summarizing the change to this repo:
2125 Return an integer summarizing the change to this repo:
2122 - nothing changed or no source: 0
2126 - nothing changed or no source: 0
2123 - more heads than before: 1+added heads (2..n)
2127 - more heads than before: 1+added heads (2..n)
2124 - fewer heads than before: -1-removed heads (-2..-n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2125 - number of heads stays the same: 1
2129 - number of heads stays the same: 1
2126 """
2130 """
2127 def csmap(x):
2131 def csmap(x):
2128 self.ui.debug("add changeset %s\n" % short(x))
2132 self.ui.debug("add changeset %s\n" % short(x))
2129 return len(cl)
2133 return len(cl)
2130
2134
2131 def revmap(x):
2135 def revmap(x):
2132 return cl.rev(x)
2136 return cl.rev(x)
2133
2137
2134 if not source:
2138 if not source:
2135 return 0
2139 return 0
2136
2140
2137 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2138
2142
2139 changesets = files = revisions = 0
2143 changesets = files = revisions = 0
2140 efiles = set()
2144 efiles = set()
2141
2145
2142 # write changelog data to temp files so concurrent readers will not see
2146 # write changelog data to temp files so concurrent readers will not see
2143 # inconsistent view
2147 # inconsistent view
2144 cl = self.changelog
2148 cl = self.changelog
2145 cl.delayupdate()
2149 cl.delayupdate()
2146 oldheads = cl.heads()
2150 oldheads = cl.heads()
2147
2151
2148 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2149 try:
2153 try:
2150 trp = weakref.proxy(tr)
2154 trp = weakref.proxy(tr)
2151 # pull off the changeset group
2155 # pull off the changeset group
2152 self.ui.status(_("adding changesets\n"))
2156 self.ui.status(_("adding changesets\n"))
2153 clstart = len(cl)
2157 clstart = len(cl)
2154 class prog(object):
2158 class prog(object):
2155 step = _('changesets')
2159 step = _('changesets')
2156 count = 1
2160 count = 1
2157 ui = self.ui
2161 ui = self.ui
2158 total = None
2162 total = None
2159 def __call__(self):
2163 def __call__(self):
2160 self.ui.progress(self.step, self.count, unit=_('chunks'),
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2161 total=self.total)
2165 total=self.total)
2162 self.count += 1
2166 self.count += 1
2163 pr = prog()
2167 pr = prog()
2164 source.callback = pr
2168 source.callback = pr
2165
2169
2166 source.changelogheader()
2170 source.changelogheader()
2167 srccontent = cl.addgroup(source, csmap, trp)
2171 srccontent = cl.addgroup(source, csmap, trp)
2168 if not (srccontent or emptyok):
2172 if not (srccontent or emptyok):
2169 raise util.Abort(_("received changelog group is empty"))
2173 raise util.Abort(_("received changelog group is empty"))
2170 clend = len(cl)
2174 clend = len(cl)
2171 changesets = clend - clstart
2175 changesets = clend - clstart
2172 for c in xrange(clstart, clend):
2176 for c in xrange(clstart, clend):
2173 efiles.update(self[c].files())
2177 efiles.update(self[c].files())
2174 efiles = len(efiles)
2178 efiles = len(efiles)
2175 self.ui.progress(_('changesets'), None)
2179 self.ui.progress(_('changesets'), None)
2176
2180
2177 # pull off the manifest group
2181 # pull off the manifest group
2178 self.ui.status(_("adding manifests\n"))
2182 self.ui.status(_("adding manifests\n"))
2179 pr.step = _('manifests')
2183 pr.step = _('manifests')
2180 pr.count = 1
2184 pr.count = 1
2181 pr.total = changesets # manifests <= changesets
2185 pr.total = changesets # manifests <= changesets
2182 # no need to check for empty manifest group here:
2186 # no need to check for empty manifest group here:
2183 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2184 # no new manifest will be created and the manifest group will
2188 # no new manifest will be created and the manifest group will
2185 # be empty during the pull
2189 # be empty during the pull
2186 source.manifestheader()
2190 source.manifestheader()
2187 self.manifest.addgroup(source, revmap, trp)
2191 self.manifest.addgroup(source, revmap, trp)
2188 self.ui.progress(_('manifests'), None)
2192 self.ui.progress(_('manifests'), None)
2189
2193
2190 needfiles = {}
2194 needfiles = {}
2191 if self.ui.configbool('server', 'validate', default=False):
2195 if self.ui.configbool('server', 'validate', default=False):
2192 # validate incoming csets have their manifests
2196 # validate incoming csets have their manifests
2193 for cset in xrange(clstart, clend):
2197 for cset in xrange(clstart, clend):
2194 mfest = self.changelog.read(self.changelog.node(cset))[0]
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2195 mfest = self.manifest.readdelta(mfest)
2199 mfest = self.manifest.readdelta(mfest)
2196 # store file nodes we must see
2200 # store file nodes we must see
2197 for f, n in mfest.iteritems():
2201 for f, n in mfest.iteritems():
2198 needfiles.setdefault(f, set()).add(n)
2202 needfiles.setdefault(f, set()).add(n)
2199
2203
2200 # process the files
2204 # process the files
2201 self.ui.status(_("adding file changes\n"))
2205 self.ui.status(_("adding file changes\n"))
2202 pr.step = _('files')
2206 pr.step = _('files')
2203 pr.count = 1
2207 pr.count = 1
2204 pr.total = efiles
2208 pr.total = efiles
2205 source.callback = None
2209 source.callback = None
2206
2210
2207 while True:
2211 while True:
2208 chunkdata = source.filelogheader()
2212 chunkdata = source.filelogheader()
2209 if not chunkdata:
2213 if not chunkdata:
2210 break
2214 break
2211 f = chunkdata["filename"]
2215 f = chunkdata["filename"]
2212 self.ui.debug("adding %s revisions\n" % f)
2216 self.ui.debug("adding %s revisions\n" % f)
2213 pr()
2217 pr()
2214 fl = self.file(f)
2218 fl = self.file(f)
2215 o = len(fl)
2219 o = len(fl)
2216 if not fl.addgroup(source, revmap, trp):
2220 if not fl.addgroup(source, revmap, trp):
2217 raise util.Abort(_("received file revlog group is empty"))
2221 raise util.Abort(_("received file revlog group is empty"))
2218 revisions += len(fl) - o
2222 revisions += len(fl) - o
2219 files += 1
2223 files += 1
2220 if f in needfiles:
2224 if f in needfiles:
2221 needs = needfiles[f]
2225 needs = needfiles[f]
2222 for new in xrange(o, len(fl)):
2226 for new in xrange(o, len(fl)):
2223 n = fl.node(new)
2227 n = fl.node(new)
2224 if n in needs:
2228 if n in needs:
2225 needs.remove(n)
2229 needs.remove(n)
2226 if not needs:
2230 if not needs:
2227 del needfiles[f]
2231 del needfiles[f]
2228 self.ui.progress(_('files'), None)
2232 self.ui.progress(_('files'), None)
2229
2233
2230 for f, needs in needfiles.iteritems():
2234 for f, needs in needfiles.iteritems():
2231 fl = self.file(f)
2235 fl = self.file(f)
2232 for n in needs:
2236 for n in needs:
2233 try:
2237 try:
2234 fl.rev(n)
2238 fl.rev(n)
2235 except error.LookupError:
2239 except error.LookupError:
2236 raise util.Abort(
2240 raise util.Abort(
2237 _('missing file data for %s:%s - run hg verify') %
2241 _('missing file data for %s:%s - run hg verify') %
2238 (f, hex(n)))
2242 (f, hex(n)))
2239
2243
2240 dh = 0
2244 dh = 0
2241 if oldheads:
2245 if oldheads:
2242 heads = cl.heads()
2246 heads = cl.heads()
2243 dh = len(heads) - len(oldheads)
2247 dh = len(heads) - len(oldheads)
2244 for h in heads:
2248 for h in heads:
2245 if h not in oldheads and self[h].closesbranch():
2249 if h not in oldheads and self[h].closesbranch():
2246 dh -= 1
2250 dh -= 1
2247 htext = ""
2251 htext = ""
2248 if dh:
2252 if dh:
2249 htext = _(" (%+d heads)") % dh
2253 htext = _(" (%+d heads)") % dh
2250
2254
2251 self.ui.status(_("added %d changesets"
2255 self.ui.status(_("added %d changesets"
2252 " with %d changes to %d files%s\n")
2256 " with %d changes to %d files%s\n")
2253 % (changesets, revisions, files, htext))
2257 % (changesets, revisions, files, htext))
2254
2258
2255 if changesets > 0:
2259 if changesets > 0:
2256 p = lambda: cl.writepending() and self.root or ""
2260 p = lambda: cl.writepending() and self.root or ""
2257 self.hook('pretxnchangegroup', throw=True,
2261 self.hook('pretxnchangegroup', throw=True,
2258 node=hex(cl.node(clstart)), source=srctype,
2262 node=hex(cl.node(clstart)), source=srctype,
2259 url=url, pending=p)
2263 url=url, pending=p)
2260
2264
2261 added = [cl.node(r) for r in xrange(clstart, clend)]
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2262 publishing = self.ui.configbool('phases', 'publish', True)
2266 publishing = self.ui.configbool('phases', 'publish', True)
2263 if srctype == 'push':
2267 if srctype == 'push':
2264 # Old server can not push the boundary themself.
2268 # Old server can not push the boundary themself.
2265 # New server won't push the boundary if changeset already
2269 # New server won't push the boundary if changeset already
2266 # existed locally as secrete
2270 # existed locally as secrete
2267 #
2271 #
2268 # We should not use added here but the list of all change in
2272 # We should not use added here but the list of all change in
2269 # the bundle
2273 # the bundle
2270 if publishing:
2274 if publishing:
2271 phases.advanceboundary(self, phases.public, srccontent)
2275 phases.advanceboundary(self, phases.public, srccontent)
2272 else:
2276 else:
2273 phases.advanceboundary(self, phases.draft, srccontent)
2277 phases.advanceboundary(self, phases.draft, srccontent)
2274 phases.retractboundary(self, phases.draft, added)
2278 phases.retractboundary(self, phases.draft, added)
2275 elif srctype != 'strip':
2279 elif srctype != 'strip':
2276 # publishing only alter behavior during push
2280 # publishing only alter behavior during push
2277 #
2281 #
2278 # strip should not touch boundary at all
2282 # strip should not touch boundary at all
2279 phases.retractboundary(self, phases.draft, added)
2283 phases.retractboundary(self, phases.draft, added)
2280
2284
2281 # make changelog see real files again
2285 # make changelog see real files again
2282 cl.finalize(trp)
2286 cl.finalize(trp)
2283
2287
2284 tr.close()
2288 tr.close()
2285
2289
2286 if changesets > 0:
2290 if changesets > 0:
2287 def runhooks():
2291 def runhooks():
2288 # forcefully update the on-disk branch cache
2292 # forcefully update the on-disk branch cache
2289 self.ui.debug("updating the branch cache\n")
2293 self.ui.debug("updating the branch cache\n")
2290 self.updatebranchcache()
2294 self.updatebranchcache()
2291 self.hook("changegroup", node=hex(cl.node(clstart)),
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2292 source=srctype, url=url)
2296 source=srctype, url=url)
2293
2297
2294 for n in added:
2298 for n in added:
2295 self.hook("incoming", node=hex(n), source=srctype,
2299 self.hook("incoming", node=hex(n), source=srctype,
2296 url=url)
2300 url=url)
2297 self._afterlock(runhooks)
2301 self._afterlock(runhooks)
2298
2302
2299 finally:
2303 finally:
2300 tr.release()
2304 tr.release()
2301 # never return 0 here:
2305 # never return 0 here:
2302 if dh < 0:
2306 if dh < 0:
2303 return dh - 1
2307 return dh - 1
2304 else:
2308 else:
2305 return dh + 1
2309 return dh + 1
2306
2310
2307 def stream_in(self, remote, requirements):
2311 def stream_in(self, remote, requirements):
2308 lock = self.lock()
2312 lock = self.lock()
2309 try:
2313 try:
2310 fp = remote.stream_out()
2314 fp = remote.stream_out()
2311 l = fp.readline()
2315 l = fp.readline()
2312 try:
2316 try:
2313 resp = int(l)
2317 resp = int(l)
2314 except ValueError:
2318 except ValueError:
2315 raise error.ResponseError(
2319 raise error.ResponseError(
2316 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2317 if resp == 1:
2321 if resp == 1:
2318 raise util.Abort(_('operation forbidden by server'))
2322 raise util.Abort(_('operation forbidden by server'))
2319 elif resp == 2:
2323 elif resp == 2:
2320 raise util.Abort(_('locking the remote repository failed'))
2324 raise util.Abort(_('locking the remote repository failed'))
2321 elif resp != 0:
2325 elif resp != 0:
2322 raise util.Abort(_('the server sent an unknown error code'))
2326 raise util.Abort(_('the server sent an unknown error code'))
2323 self.ui.status(_('streaming all changes\n'))
2327 self.ui.status(_('streaming all changes\n'))
2324 l = fp.readline()
2328 l = fp.readline()
2325 try:
2329 try:
2326 total_files, total_bytes = map(int, l.split(' ', 1))
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2327 except (ValueError, TypeError):
2331 except (ValueError, TypeError):
2328 raise error.ResponseError(
2332 raise error.ResponseError(
2329 _('unexpected response from remote server:'), l)
2333 _('unexpected response from remote server:'), l)
2330 self.ui.status(_('%d files to transfer, %s of data\n') %
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2331 (total_files, util.bytecount(total_bytes)))
2335 (total_files, util.bytecount(total_bytes)))
2332 handled_bytes = 0
2336 handled_bytes = 0
2333 self.ui.progress(_('clone'), 0, total=total_bytes)
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2334 start = time.time()
2338 start = time.time()
2335 for i in xrange(total_files):
2339 for i in xrange(total_files):
2336 # XXX doesn't support '\n' or '\r' in filenames
2340 # XXX doesn't support '\n' or '\r' in filenames
2337 l = fp.readline()
2341 l = fp.readline()
2338 try:
2342 try:
2339 name, size = l.split('\0', 1)
2343 name, size = l.split('\0', 1)
2340 size = int(size)
2344 size = int(size)
2341 except (ValueError, TypeError):
2345 except (ValueError, TypeError):
2342 raise error.ResponseError(
2346 raise error.ResponseError(
2343 _('unexpected response from remote server:'), l)
2347 _('unexpected response from remote server:'), l)
2344 if self.ui.debugflag:
2348 if self.ui.debugflag:
2345 self.ui.debug('adding %s (%s)\n' %
2349 self.ui.debug('adding %s (%s)\n' %
2346 (name, util.bytecount(size)))
2350 (name, util.bytecount(size)))
2347 # for backwards compat, name was partially encoded
2351 # for backwards compat, name was partially encoded
2348 ofp = self.sopener(store.decodedir(name), 'w')
2352 ofp = self.sopener(store.decodedir(name), 'w')
2349 for chunk in util.filechunkiter(fp, limit=size):
2353 for chunk in util.filechunkiter(fp, limit=size):
2350 handled_bytes += len(chunk)
2354 handled_bytes += len(chunk)
2351 self.ui.progress(_('clone'), handled_bytes,
2355 self.ui.progress(_('clone'), handled_bytes,
2352 total=total_bytes)
2356 total=total_bytes)
2353 ofp.write(chunk)
2357 ofp.write(chunk)
2354 ofp.close()
2358 ofp.close()
2355 elapsed = time.time() - start
2359 elapsed = time.time() - start
2356 if elapsed <= 0:
2360 if elapsed <= 0:
2357 elapsed = 0.001
2361 elapsed = 0.001
2358 self.ui.progress(_('clone'), None)
2362 self.ui.progress(_('clone'), None)
2359 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2360 (util.bytecount(total_bytes), elapsed,
2364 (util.bytecount(total_bytes), elapsed,
2361 util.bytecount(total_bytes / elapsed)))
2365 util.bytecount(total_bytes / elapsed)))
2362
2366
2363 # new requirements = old non-format requirements +
2367 # new requirements = old non-format requirements +
2364 # new format-related
2368 # new format-related
2365 # requirements from the streamed-in repository
2369 # requirements from the streamed-in repository
2366 requirements.update(set(self.requirements) - self.supportedformats)
2370 requirements.update(set(self.requirements) - self.supportedformats)
2367 self._applyrequirements(requirements)
2371 self._applyrequirements(requirements)
2368 self._writerequirements()
2372 self._writerequirements()
2369
2373
2370 self.invalidate()
2374 self.invalidate()
2371 return len(self.heads()) + 1
2375 return len(self.heads()) + 1
2372 finally:
2376 finally:
2373 lock.release()
2377 lock.release()
2374
2378
2375 def clone(self, remote, heads=[], stream=False):
2379 def clone(self, remote, heads=[], stream=False):
2376 '''clone remote repository.
2380 '''clone remote repository.
2377
2381
2378 keyword arguments:
2382 keyword arguments:
2379 heads: list of revs to clone (forces use of pull)
2383 heads: list of revs to clone (forces use of pull)
2380 stream: use streaming clone if possible'''
2384 stream: use streaming clone if possible'''
2381
2385
2382 # now, all clients that can request uncompressed clones can
2386 # now, all clients that can request uncompressed clones can
2383 # read repo formats supported by all servers that can serve
2387 # read repo formats supported by all servers that can serve
2384 # them.
2388 # them.
2385
2389
2386 # if revlog format changes, client will have to check version
2390 # if revlog format changes, client will have to check version
2387 # and format flags on "stream" capability, and use
2391 # and format flags on "stream" capability, and use
2388 # uncompressed only if compatible.
2392 # uncompressed only if compatible.
2389
2393
2390 if not stream:
2394 if not stream:
2391 # if the server explicitely prefer to stream (for fast LANs)
2395 # if the server explicitely prefer to stream (for fast LANs)
2392 stream = remote.capable('stream-preferred')
2396 stream = remote.capable('stream-preferred')
2393
2397
2394 if stream and not heads:
2398 if stream and not heads:
2395 # 'stream' means remote revlog format is revlogv1 only
2399 # 'stream' means remote revlog format is revlogv1 only
2396 if remote.capable('stream'):
2400 if remote.capable('stream'):
2397 return self.stream_in(remote, set(('revlogv1',)))
2401 return self.stream_in(remote, set(('revlogv1',)))
2398 # otherwise, 'streamreqs' contains the remote revlog format
2402 # otherwise, 'streamreqs' contains the remote revlog format
2399 streamreqs = remote.capable('streamreqs')
2403 streamreqs = remote.capable('streamreqs')
2400 if streamreqs:
2404 if streamreqs:
2401 streamreqs = set(streamreqs.split(','))
2405 streamreqs = set(streamreqs.split(','))
2402 # if we support it, stream in and adjust our requirements
2406 # if we support it, stream in and adjust our requirements
2403 if not streamreqs - self.supportedformats:
2407 if not streamreqs - self.supportedformats:
2404 return self.stream_in(remote, streamreqs)
2408 return self.stream_in(remote, streamreqs)
2405 return self.pull(remote, heads)
2409 return self.pull(remote, heads)
2406
2410
2407 def pushkey(self, namespace, key, old, new):
2411 def pushkey(self, namespace, key, old, new):
2408 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2409 old=old, new=new)
2413 old=old, new=new)
2410 ret = pushkey.push(self, namespace, key, old, new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2411 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2412 ret=ret)
2416 ret=ret)
2413 return ret
2417 return ret
2414
2418
2415 def listkeys(self, namespace):
2419 def listkeys(self, namespace):
2416 self.hook('prelistkeys', throw=True, namespace=namespace)
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2417 values = pushkey.list(self, namespace)
2421 values = pushkey.list(self, namespace)
2418 self.hook('listkeys', namespace=namespace, values=values)
2422 self.hook('listkeys', namespace=namespace, values=values)
2419 return values
2423 return values
2420
2424
2421 def debugwireargs(self, one, two, three=None, four=None, five=None):
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2422 '''used to test argument passing over the wire'''
2426 '''used to test argument passing over the wire'''
2423 return "%s %s %s %s %s" % (one, two, three, four, five)
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2424
2428
2425 def savecommitmessage(self, text):
2429 def savecommitmessage(self, text):
2426 fp = self.opener('last-message.txt', 'wb')
2430 fp = self.opener('last-message.txt', 'wb')
2427 try:
2431 try:
2428 fp.write(text)
2432 fp.write(text)
2429 finally:
2433 finally:
2430 fp.close()
2434 fp.close()
2431 return self.pathto(fp.name[len(self.root)+1:])
2435 return self.pathto(fp.name[len(self.root)+1:])
2432
2436
2433 # used to avoid circular references so destructors work
2437 # used to avoid circular references so destructors work
2434 def aftertrans(files):
2438 def aftertrans(files):
2435 renamefiles = [tuple(t) for t in files]
2439 renamefiles = [tuple(t) for t in files]
2436 def a():
2440 def a():
2437 for src, dest in renamefiles:
2441 for src, dest in renamefiles:
2438 try:
2442 try:
2439 util.rename(src, dest)
2443 util.rename(src, dest)
2440 except OSError: # journal file does not yet exist
2444 except OSError: # journal file does not yet exist
2441 pass
2445 pass
2442 return a
2446 return a
2443
2447
2444 def undoname(fn):
2448 def undoname(fn):
2445 base, name = os.path.split(fn)
2449 base, name = os.path.split(fn)
2446 assert name.startswith('journal')
2450 assert name.startswith('journal')
2447 return os.path.join(base, name.replace('journal', 'undo', 1))
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2448
2452
2449 def instance(ui, path, create):
2453 def instance(ui, path, create):
2450 return localrepository(ui, util.urllocalpath(path), create)
2454 return localrepository(ui, util.urllocalpath(path), create)
2451
2455
2452 def islocal(path):
2456 def islocal(path):
2453 return True
2457 return True
General Comments 0
You need to be logged in to leave comments. Login now