##// END OF EJS Templates
localrepo: don't add deleted files to list of modified/added files (issue2761)...
Idan Kamara -
r13929:cff56a0e default
parent child Browse files
Show More
@@ -0,0 +1,23 b''
1 Test issue2761
2
3 $ hg init
4
5 $ touch to-be-deleted
6 $ hg add
7 adding to-be-deleted
8 $ hg ci -m first
9 $ echo a > to-be-deleted
10 $ hg ci -m second
11 $ rm to-be-deleted
12 $ hg diff -r 0
13
14 Same issue, different code path
15
16 $ hg up -C
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 $ touch doesnt-exist-in-1
19 $ hg add
20 adding doesnt-exist-in-1
21 $ hg ci -m third
22 $ rm doesnt-exist-in-1
23 $ hg diff -r 1
@@ -1,1934 +1,1935 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.auditor = util.path_auditor(self.root, self._checknested)
35 self.opener = util.opener(self.path)
35 self.opener = util.opener(self.path)
36 self.wopener = util.opener(self.root)
36 self.wopener = util.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 try:
365 try:
366 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
369 except error.LookupError:
370 pass
370 pass
371 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
372 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
373 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
374 return (tags, tagtypes)
374 return (tags, tagtypes)
375
375
376 def tagtype(self, tagname):
376 def tagtype(self, tagname):
377 '''
377 '''
378 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
379
379
380 'local' : a local tag
380 'local' : a local tag
381 'global' : a global tag
381 'global' : a global tag
382 None : tag does not exist
382 None : tag does not exist
383 '''
383 '''
384
384
385 self.tags()
385 self.tags()
386
386
387 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
388
388
389 def tagslist(self):
389 def tagslist(self):
390 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
391 l = []
391 l = []
392 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
393 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
394 l.append((r, t, n))
394 l.append((r, t, n))
395 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
396
396
397 def nodetags(self, node):
397 def nodetags(self, node):
398 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
399 if not self.nodetagscache:
399 if not self.nodetagscache:
400 self.nodetagscache = {}
400 self.nodetagscache = {}
401 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
402 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
403 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
404 tags.sort()
404 tags.sort()
405 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
406
406
407 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
408 marks = []
408 marks = []
409 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
410 if n == node:
410 if n == node:
411 marks.append(bookmark)
411 marks.append(bookmark)
412 return sorted(marks)
412 return sorted(marks)
413
413
414 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
415 # TODO: rename this function?
415 # TODO: rename this function?
416 tiprev = len(self) - 1
416 tiprev = len(self) - 1
417 if lrev != tiprev:
417 if lrev != tiprev:
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
419 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
421
421
422 return partial
422 return partial
423
423
424 def updatebranchcache(self):
424 def updatebranchcache(self):
425 tip = self.changelog.tip()
425 tip = self.changelog.tip()
426 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
427 return self._branchcache
427 return self._branchcache
428
428
429 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
430 self._branchcachetip = tip
430 self._branchcachetip = tip
431 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
432 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
433 else:
433 else:
434 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
435 partial = self._branchcache
435 partial = self._branchcache
436
436
437 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
438 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
439 self._branchcache = partial
439 self._branchcache = partial
440
440
441 def branchmap(self):
441 def branchmap(self):
442 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
443 self.updatebranchcache()
443 self.updatebranchcache()
444 return self._branchcache
444 return self._branchcache
445
445
446 def branchtags(self):
446 def branchtags(self):
447 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
448 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
449 bt = {}
449 bt = {}
450 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
451 tip = heads[-1]
451 tip = heads[-1]
452 for h in reversed(heads):
452 for h in reversed(heads):
453 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
454 tip = h
454 tip = h
455 break
455 break
456 bt[bn] = tip
456 bt[bn] = tip
457 return bt
457 return bt
458
458
459 def _readbranchcache(self):
459 def _readbranchcache(self):
460 partial = {}
460 partial = {}
461 try:
461 try:
462 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
463 lines = f.read().split('\n')
463 lines = f.read().split('\n')
464 f.close()
464 f.close()
465 except (IOError, OSError):
465 except (IOError, OSError):
466 return {}, nullid, nullrev
466 return {}, nullid, nullrev
467
467
468 try:
468 try:
469 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
470 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
471 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
472 # invalidate the cache
472 # invalidate the cache
473 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
474 for l in lines:
474 for l in lines:
475 if not l:
475 if not l:
476 continue
476 continue
477 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
478 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
479 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
480 except KeyboardInterrupt:
480 except KeyboardInterrupt:
481 raise
481 raise
482 except Exception, inst:
482 except Exception, inst:
483 if self.ui.debugflag:
483 if self.ui.debugflag:
484 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
485 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
486 return partial, last, lrev
486 return partial, last, lrev
487
487
488 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
489 try:
489 try:
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
491 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
492 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
493 for node in nodes:
493 for node in nodes:
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
495 f.rename()
495 f.rename()
496 except (IOError, OSError):
496 except (IOError, OSError):
497 pass
497 pass
498
498
499 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
500 # collect new branch entries
500 # collect new branch entries
501 newbranches = {}
501 newbranches = {}
502 for c in ctxgen:
502 for c in ctxgen:
503 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
504 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
505 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
507 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
508 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
509 bheads.extend(newnodes)
509 bheads.extend(newnodes)
510 if len(bheads) <= 1:
510 if len(bheads) <= 1:
511 continue
511 continue
512 # starting from tip means fewer passes over reachable
512 # starting from tip means fewer passes over reachable
513 while newnodes:
513 while newnodes:
514 latest = newnodes.pop()
514 latest = newnodes.pop()
515 if latest not in bheads:
515 if latest not in bheads:
516 continue
516 continue
517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
518 reachable = self.changelog.reachable(latest, minbhrev)
518 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable.remove(latest)
519 reachable.remove(latest)
520 bheads = [b for b in bheads if b not in reachable]
520 bheads = [b for b in bheads if b not in reachable]
521 partial[branch] = bheads
521 partial[branch] = bheads
522
522
523 def lookup(self, key):
523 def lookup(self, key):
524 if isinstance(key, int):
524 if isinstance(key, int):
525 return self.changelog.node(key)
525 return self.changelog.node(key)
526 elif key == '.':
526 elif key == '.':
527 return self.dirstate.p1()
527 return self.dirstate.p1()
528 elif key == 'null':
528 elif key == 'null':
529 return nullid
529 return nullid
530 elif key == 'tip':
530 elif key == 'tip':
531 return self.changelog.tip()
531 return self.changelog.tip()
532 n = self.changelog._match(key)
532 n = self.changelog._match(key)
533 if n:
533 if n:
534 return n
534 return n
535 if key in self._bookmarks:
535 if key in self._bookmarks:
536 return self._bookmarks[key]
536 return self._bookmarks[key]
537 if key in self.tags():
537 if key in self.tags():
538 return self.tags()[key]
538 return self.tags()[key]
539 if key in self.branchtags():
539 if key in self.branchtags():
540 return self.branchtags()[key]
540 return self.branchtags()[key]
541 n = self.changelog._partialmatch(key)
541 n = self.changelog._partialmatch(key)
542 if n:
542 if n:
543 return n
543 return n
544
544
545 # can't find key, check if it might have come from damaged dirstate
545 # can't find key, check if it might have come from damaged dirstate
546 if key in self.dirstate.parents():
546 if key in self.dirstate.parents():
547 raise error.Abort(_("working directory has unknown parent '%s'!")
547 raise error.Abort(_("working directory has unknown parent '%s'!")
548 % short(key))
548 % short(key))
549 try:
549 try:
550 if len(key) == 20:
550 if len(key) == 20:
551 key = hex(key)
551 key = hex(key)
552 except:
552 except:
553 pass
553 pass
554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
555
555
556 def lookupbranch(self, key, remote=None):
556 def lookupbranch(self, key, remote=None):
557 repo = remote or self
557 repo = remote or self
558 if key in repo.branchmap():
558 if key in repo.branchmap():
559 return key
559 return key
560
560
561 repo = (remote and remote.local()) and remote or self
561 repo = (remote and remote.local()) and remote or self
562 return repo[key].branch()
562 return repo[key].branch()
563
563
564 def known(self, nodes):
564 def known(self, nodes):
565 nm = self.changelog.nodemap
565 nm = self.changelog.nodemap
566 return [(n in nm) for n in nodes]
566 return [(n in nm) for n in nodes]
567
567
568 def local(self):
568 def local(self):
569 return True
569 return True
570
570
571 def join(self, f):
571 def join(self, f):
572 return os.path.join(self.path, f)
572 return os.path.join(self.path, f)
573
573
574 def wjoin(self, f):
574 def wjoin(self, f):
575 return os.path.join(self.root, f)
575 return os.path.join(self.root, f)
576
576
577 def file(self, f):
577 def file(self, f):
578 if f[0] == '/':
578 if f[0] == '/':
579 f = f[1:]
579 f = f[1:]
580 return filelog.filelog(self.sopener, f)
580 return filelog.filelog(self.sopener, f)
581
581
582 def changectx(self, changeid):
582 def changectx(self, changeid):
583 return self[changeid]
583 return self[changeid]
584
584
585 def parents(self, changeid=None):
585 def parents(self, changeid=None):
586 '''get list of changectxs for parents of changeid'''
586 '''get list of changectxs for parents of changeid'''
587 return self[changeid].parents()
587 return self[changeid].parents()
588
588
589 def filectx(self, path, changeid=None, fileid=None):
589 def filectx(self, path, changeid=None, fileid=None):
590 """changeid can be a changeset revision, node, or tag.
590 """changeid can be a changeset revision, node, or tag.
591 fileid can be a file revision or node."""
591 fileid can be a file revision or node."""
592 return context.filectx(self, path, changeid, fileid)
592 return context.filectx(self, path, changeid, fileid)
593
593
594 def getcwd(self):
594 def getcwd(self):
595 return self.dirstate.getcwd()
595 return self.dirstate.getcwd()
596
596
597 def pathto(self, f, cwd=None):
597 def pathto(self, f, cwd=None):
598 return self.dirstate.pathto(f, cwd)
598 return self.dirstate.pathto(f, cwd)
599
599
600 def wfile(self, f, mode='r'):
600 def wfile(self, f, mode='r'):
601 return self.wopener(f, mode)
601 return self.wopener(f, mode)
602
602
603 def _link(self, f):
603 def _link(self, f):
604 return os.path.islink(self.wjoin(f))
604 return os.path.islink(self.wjoin(f))
605
605
606 def _loadfilter(self, filter):
606 def _loadfilter(self, filter):
607 if filter not in self.filterpats:
607 if filter not in self.filterpats:
608 l = []
608 l = []
609 for pat, cmd in self.ui.configitems(filter):
609 for pat, cmd in self.ui.configitems(filter):
610 if cmd == '!':
610 if cmd == '!':
611 continue
611 continue
612 mf = matchmod.match(self.root, '', [pat])
612 mf = matchmod.match(self.root, '', [pat])
613 fn = None
613 fn = None
614 params = cmd
614 params = cmd
615 for name, filterfn in self._datafilters.iteritems():
615 for name, filterfn in self._datafilters.iteritems():
616 if cmd.startswith(name):
616 if cmd.startswith(name):
617 fn = filterfn
617 fn = filterfn
618 params = cmd[len(name):].lstrip()
618 params = cmd[len(name):].lstrip()
619 break
619 break
620 if not fn:
620 if not fn:
621 fn = lambda s, c, **kwargs: util.filter(s, c)
621 fn = lambda s, c, **kwargs: util.filter(s, c)
622 # Wrap old filters not supporting keyword arguments
622 # Wrap old filters not supporting keyword arguments
623 if not inspect.getargspec(fn)[2]:
623 if not inspect.getargspec(fn)[2]:
624 oldfn = fn
624 oldfn = fn
625 fn = lambda s, c, **kwargs: oldfn(s, c)
625 fn = lambda s, c, **kwargs: oldfn(s, c)
626 l.append((mf, fn, params))
626 l.append((mf, fn, params))
627 self.filterpats[filter] = l
627 self.filterpats[filter] = l
628 return self.filterpats[filter]
628 return self.filterpats[filter]
629
629
630 def _filter(self, filterpats, filename, data):
630 def _filter(self, filterpats, filename, data):
631 for mf, fn, cmd in filterpats:
631 for mf, fn, cmd in filterpats:
632 if mf(filename):
632 if mf(filename):
633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
635 break
635 break
636
636
637 return data
637 return data
638
638
639 @propertycache
639 @propertycache
640 def _encodefilterpats(self):
640 def _encodefilterpats(self):
641 return self._loadfilter('encode')
641 return self._loadfilter('encode')
642
642
643 @propertycache
643 @propertycache
644 def _decodefilterpats(self):
644 def _decodefilterpats(self):
645 return self._loadfilter('decode')
645 return self._loadfilter('decode')
646
646
647 def adddatafilter(self, name, filter):
647 def adddatafilter(self, name, filter):
648 self._datafilters[name] = filter
648 self._datafilters[name] = filter
649
649
650 def wread(self, filename):
650 def wread(self, filename):
651 if self._link(filename):
651 if self._link(filename):
652 data = os.readlink(self.wjoin(filename))
652 data = os.readlink(self.wjoin(filename))
653 else:
653 else:
654 data = self.wopener(filename, 'r').read()
654 data = self.wopener(filename, 'r').read()
655 return self._filter(self._encodefilterpats, filename, data)
655 return self._filter(self._encodefilterpats, filename, data)
656
656
657 def wwrite(self, filename, data, flags):
657 def wwrite(self, filename, data, flags):
658 data = self._filter(self._decodefilterpats, filename, data)
658 data = self._filter(self._decodefilterpats, filename, data)
659 if 'l' in flags:
659 if 'l' in flags:
660 self.wopener.symlink(data, filename)
660 self.wopener.symlink(data, filename)
661 else:
661 else:
662 self.wopener(filename, 'w').write(data)
662 self.wopener(filename, 'w').write(data)
663 if 'x' in flags:
663 if 'x' in flags:
664 util.set_flags(self.wjoin(filename), False, True)
664 util.set_flags(self.wjoin(filename), False, True)
665
665
666 def wwritedata(self, filename, data):
666 def wwritedata(self, filename, data):
667 return self._filter(self._decodefilterpats, filename, data)
667 return self._filter(self._decodefilterpats, filename, data)
668
668
669 def transaction(self, desc):
669 def transaction(self, desc):
670 tr = self._transref and self._transref() or None
670 tr = self._transref and self._transref() or None
671 if tr and tr.running():
671 if tr and tr.running():
672 return tr.nest()
672 return tr.nest()
673
673
674 # abort here if the journal already exists
674 # abort here if the journal already exists
675 if os.path.exists(self.sjoin("journal")):
675 if os.path.exists(self.sjoin("journal")):
676 raise error.RepoError(
676 raise error.RepoError(
677 _("abandoned transaction found - run hg recover"))
677 _("abandoned transaction found - run hg recover"))
678
678
679 # save dirstate for rollback
679 # save dirstate for rollback
680 try:
680 try:
681 ds = self.opener("dirstate").read()
681 ds = self.opener("dirstate").read()
682 except IOError:
682 except IOError:
683 ds = ""
683 ds = ""
684 self.opener("journal.dirstate", "w").write(ds)
684 self.opener("journal.dirstate", "w").write(ds)
685 self.opener("journal.branch", "w").write(
685 self.opener("journal.branch", "w").write(
686 encoding.fromlocal(self.dirstate.branch()))
686 encoding.fromlocal(self.dirstate.branch()))
687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
688
688
689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
691 (self.join("journal.branch"), self.join("undo.branch")),
691 (self.join("journal.branch"), self.join("undo.branch")),
692 (self.join("journal.desc"), self.join("undo.desc"))]
692 (self.join("journal.desc"), self.join("undo.desc"))]
693 tr = transaction.transaction(self.ui.warn, self.sopener,
693 tr = transaction.transaction(self.ui.warn, self.sopener,
694 self.sjoin("journal"),
694 self.sjoin("journal"),
695 aftertrans(renames),
695 aftertrans(renames),
696 self.store.createmode)
696 self.store.createmode)
697 self._transref = weakref.ref(tr)
697 self._transref = weakref.ref(tr)
698 return tr
698 return tr
699
699
700 def recover(self):
700 def recover(self):
701 lock = self.lock()
701 lock = self.lock()
702 try:
702 try:
703 if os.path.exists(self.sjoin("journal")):
703 if os.path.exists(self.sjoin("journal")):
704 self.ui.status(_("rolling back interrupted transaction\n"))
704 self.ui.status(_("rolling back interrupted transaction\n"))
705 transaction.rollback(self.sopener, self.sjoin("journal"),
705 transaction.rollback(self.sopener, self.sjoin("journal"),
706 self.ui.warn)
706 self.ui.warn)
707 self.invalidate()
707 self.invalidate()
708 return True
708 return True
709 else:
709 else:
710 self.ui.warn(_("no interrupted transaction available\n"))
710 self.ui.warn(_("no interrupted transaction available\n"))
711 return False
711 return False
712 finally:
712 finally:
713 lock.release()
713 lock.release()
714
714
715 def rollback(self, dryrun=False):
715 def rollback(self, dryrun=False):
716 wlock = lock = None
716 wlock = lock = None
717 try:
717 try:
718 wlock = self.wlock()
718 wlock = self.wlock()
719 lock = self.lock()
719 lock = self.lock()
720 if os.path.exists(self.sjoin("undo")):
720 if os.path.exists(self.sjoin("undo")):
721 try:
721 try:
722 args = self.opener("undo.desc", "r").read().splitlines()
722 args = self.opener("undo.desc", "r").read().splitlines()
723 if len(args) >= 3 and self.ui.verbose:
723 if len(args) >= 3 and self.ui.verbose:
724 desc = _("repository tip rolled back to revision %s"
724 desc = _("repository tip rolled back to revision %s"
725 " (undo %s: %s)\n") % (
725 " (undo %s: %s)\n") % (
726 int(args[0]) - 1, args[1], args[2])
726 int(args[0]) - 1, args[1], args[2])
727 elif len(args) >= 2:
727 elif len(args) >= 2:
728 desc = _("repository tip rolled back to revision %s"
728 desc = _("repository tip rolled back to revision %s"
729 " (undo %s)\n") % (
729 " (undo %s)\n") % (
730 int(args[0]) - 1, args[1])
730 int(args[0]) - 1, args[1])
731 except IOError:
731 except IOError:
732 desc = _("rolling back unknown transaction\n")
732 desc = _("rolling back unknown transaction\n")
733 self.ui.status(desc)
733 self.ui.status(desc)
734 if dryrun:
734 if dryrun:
735 return
735 return
736 transaction.rollback(self.sopener, self.sjoin("undo"),
736 transaction.rollback(self.sopener, self.sjoin("undo"),
737 self.ui.warn)
737 self.ui.warn)
738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
739 if os.path.exists(self.join('undo.bookmarks')):
739 if os.path.exists(self.join('undo.bookmarks')):
740 util.rename(self.join('undo.bookmarks'),
740 util.rename(self.join('undo.bookmarks'),
741 self.join('bookmarks'))
741 self.join('bookmarks'))
742 try:
742 try:
743 branch = self.opener("undo.branch").read()
743 branch = self.opener("undo.branch").read()
744 self.dirstate.setbranch(branch)
744 self.dirstate.setbranch(branch)
745 except IOError:
745 except IOError:
746 self.ui.warn(_("Named branch could not be reset, "
746 self.ui.warn(_("Named branch could not be reset, "
747 "current branch still is: %s\n")
747 "current branch still is: %s\n")
748 % self.dirstate.branch())
748 % self.dirstate.branch())
749 self.invalidate()
749 self.invalidate()
750 self.dirstate.invalidate()
750 self.dirstate.invalidate()
751 self.destroyed()
751 self.destroyed()
752 parents = tuple([p.rev() for p in self.parents()])
752 parents = tuple([p.rev() for p in self.parents()])
753 if len(parents) > 1:
753 if len(parents) > 1:
754 self.ui.status(_("working directory now based on "
754 self.ui.status(_("working directory now based on "
755 "revisions %d and %d\n") % parents)
755 "revisions %d and %d\n") % parents)
756 else:
756 else:
757 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
758 "revision %d\n") % parents)
758 "revision %d\n") % parents)
759 else:
759 else:
760 self.ui.warn(_("no rollback information available\n"))
760 self.ui.warn(_("no rollback information available\n"))
761 return 1
761 return 1
762 finally:
762 finally:
763 release(lock, wlock)
763 release(lock, wlock)
764
764
765 def invalidatecaches(self):
765 def invalidatecaches(self):
766 self._tags = None
766 self._tags = None
767 self._tagtypes = None
767 self._tagtypes = None
768 self.nodetagscache = None
768 self.nodetagscache = None
769 self._branchcache = None # in UTF-8
769 self._branchcache = None # in UTF-8
770 self._branchcachetip = None
770 self._branchcachetip = None
771
771
772 def invalidate(self):
772 def invalidate(self):
773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
774 if a in self.__dict__:
774 if a in self.__dict__:
775 delattr(self, a)
775 delattr(self, a)
776 self.invalidatecaches()
776 self.invalidatecaches()
777
777
778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
779 try:
779 try:
780 l = lock.lock(lockname, 0, releasefn, desc=desc)
780 l = lock.lock(lockname, 0, releasefn, desc=desc)
781 except error.LockHeld, inst:
781 except error.LockHeld, inst:
782 if not wait:
782 if not wait:
783 raise
783 raise
784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
785 (desc, inst.locker))
785 (desc, inst.locker))
786 # default to 600 seconds timeout
786 # default to 600 seconds timeout
787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
788 releasefn, desc=desc)
788 releasefn, desc=desc)
789 if acquirefn:
789 if acquirefn:
790 acquirefn()
790 acquirefn()
791 return l
791 return l
792
792
793 def lock(self, wait=True):
793 def lock(self, wait=True):
794 '''Lock the repository store (.hg/store) and return a weak reference
794 '''Lock the repository store (.hg/store) and return a weak reference
795 to the lock. Use this before modifying the store (e.g. committing or
795 to the lock. Use this before modifying the store (e.g. committing or
796 stripping). If you are opening a transaction, get a lock as well.)'''
796 stripping). If you are opening a transaction, get a lock as well.)'''
797 l = self._lockref and self._lockref()
797 l = self._lockref and self._lockref()
798 if l is not None and l.held:
798 if l is not None and l.held:
799 l.lock()
799 l.lock()
800 return l
800 return l
801
801
802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
803 self.invalidate, _('repository %s') % self.origroot)
803 self.invalidate, _('repository %s') % self.origroot)
804 self._lockref = weakref.ref(l)
804 self._lockref = weakref.ref(l)
805 return l
805 return l
806
806
807 def wlock(self, wait=True):
807 def wlock(self, wait=True):
808 '''Lock the non-store parts of the repository (everything under
808 '''Lock the non-store parts of the repository (everything under
809 .hg except .hg/store) and return a weak reference to the lock.
809 .hg except .hg/store) and return a weak reference to the lock.
810 Use this before modifying files in .hg.'''
810 Use this before modifying files in .hg.'''
811 l = self._wlockref and self._wlockref()
811 l = self._wlockref and self._wlockref()
812 if l is not None and l.held:
812 if l is not None and l.held:
813 l.lock()
813 l.lock()
814 return l
814 return l
815
815
816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
817 self.dirstate.invalidate, _('working directory of %s') %
817 self.dirstate.invalidate, _('working directory of %s') %
818 self.origroot)
818 self.origroot)
819 self._wlockref = weakref.ref(l)
819 self._wlockref = weakref.ref(l)
820 return l
820 return l
821
821
822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
823 """
823 """
824 commit an individual file as part of a larger transaction
824 commit an individual file as part of a larger transaction
825 """
825 """
826
826
827 fname = fctx.path()
827 fname = fctx.path()
828 text = fctx.data()
828 text = fctx.data()
829 flog = self.file(fname)
829 flog = self.file(fname)
830 fparent1 = manifest1.get(fname, nullid)
830 fparent1 = manifest1.get(fname, nullid)
831 fparent2 = fparent2o = manifest2.get(fname, nullid)
831 fparent2 = fparent2o = manifest2.get(fname, nullid)
832
832
833 meta = {}
833 meta = {}
834 copy = fctx.renamed()
834 copy = fctx.renamed()
835 if copy and copy[0] != fname:
835 if copy and copy[0] != fname:
836 # Mark the new revision of this file as a copy of another
836 # Mark the new revision of this file as a copy of another
837 # file. This copy data will effectively act as a parent
837 # file. This copy data will effectively act as a parent
838 # of this new revision. If this is a merge, the first
838 # of this new revision. If this is a merge, the first
839 # parent will be the nullid (meaning "look up the copy data")
839 # parent will be the nullid (meaning "look up the copy data")
840 # and the second one will be the other parent. For example:
840 # and the second one will be the other parent. For example:
841 #
841 #
842 # 0 --- 1 --- 3 rev1 changes file foo
842 # 0 --- 1 --- 3 rev1 changes file foo
843 # \ / rev2 renames foo to bar and changes it
843 # \ / rev2 renames foo to bar and changes it
844 # \- 2 -/ rev3 should have bar with all changes and
844 # \- 2 -/ rev3 should have bar with all changes and
845 # should record that bar descends from
845 # should record that bar descends from
846 # bar in rev2 and foo in rev1
846 # bar in rev2 and foo in rev1
847 #
847 #
848 # this allows this merge to succeed:
848 # this allows this merge to succeed:
849 #
849 #
850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
851 # \ / merging rev3 and rev4 should use bar@rev2
851 # \ / merging rev3 and rev4 should use bar@rev2
852 # \- 2 --- 4 as the merge base
852 # \- 2 --- 4 as the merge base
853 #
853 #
854
854
855 cfname = copy[0]
855 cfname = copy[0]
856 crev = manifest1.get(cfname)
856 crev = manifest1.get(cfname)
857 newfparent = fparent2
857 newfparent = fparent2
858
858
859 if manifest2: # branch merge
859 if manifest2: # branch merge
860 if fparent2 == nullid or crev is None: # copied on remote side
860 if fparent2 == nullid or crev is None: # copied on remote side
861 if cfname in manifest2:
861 if cfname in manifest2:
862 crev = manifest2[cfname]
862 crev = manifest2[cfname]
863 newfparent = fparent1
863 newfparent = fparent1
864
864
865 # find source in nearest ancestor if we've lost track
865 # find source in nearest ancestor if we've lost track
866 if not crev:
866 if not crev:
867 self.ui.debug(" %s: searching for copy revision for %s\n" %
867 self.ui.debug(" %s: searching for copy revision for %s\n" %
868 (fname, cfname))
868 (fname, cfname))
869 for ancestor in self[None].ancestors():
869 for ancestor in self[None].ancestors():
870 if cfname in ancestor:
870 if cfname in ancestor:
871 crev = ancestor[cfname].filenode()
871 crev = ancestor[cfname].filenode()
872 break
872 break
873
873
874 if crev:
874 if crev:
875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
876 meta["copy"] = cfname
876 meta["copy"] = cfname
877 meta["copyrev"] = hex(crev)
877 meta["copyrev"] = hex(crev)
878 fparent1, fparent2 = nullid, newfparent
878 fparent1, fparent2 = nullid, newfparent
879 else:
879 else:
880 self.ui.warn(_("warning: can't find ancestor for '%s' "
880 self.ui.warn(_("warning: can't find ancestor for '%s' "
881 "copied from '%s'!\n") % (fname, cfname))
881 "copied from '%s'!\n") % (fname, cfname))
882
882
883 elif fparent2 != nullid:
883 elif fparent2 != nullid:
884 # is one parent an ancestor of the other?
884 # is one parent an ancestor of the other?
885 fparentancestor = flog.ancestor(fparent1, fparent2)
885 fparentancestor = flog.ancestor(fparent1, fparent2)
886 if fparentancestor == fparent1:
886 if fparentancestor == fparent1:
887 fparent1, fparent2 = fparent2, nullid
887 fparent1, fparent2 = fparent2, nullid
888 elif fparentancestor == fparent2:
888 elif fparentancestor == fparent2:
889 fparent2 = nullid
889 fparent2 = nullid
890
890
891 # is the file changed?
891 # is the file changed?
892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
893 changelist.append(fname)
893 changelist.append(fname)
894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
895
895
896 # are just the flags changed during merge?
896 # are just the flags changed during merge?
897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
898 changelist.append(fname)
898 changelist.append(fname)
899
899
900 return fparent1
900 return fparent1
901
901
902 def commit(self, text="", user=None, date=None, match=None, force=False,
902 def commit(self, text="", user=None, date=None, match=None, force=False,
903 editor=False, extra={}):
903 editor=False, extra={}):
904 """Add a new revision to current repository.
904 """Add a new revision to current repository.
905
905
906 Revision information is gathered from the working directory,
906 Revision information is gathered from the working directory,
907 match can be used to filter the committed files. If editor is
907 match can be used to filter the committed files. If editor is
908 supplied, it is called to get a commit message.
908 supplied, it is called to get a commit message.
909 """
909 """
910
910
911 def fail(f, msg):
911 def fail(f, msg):
912 raise util.Abort('%s: %s' % (f, msg))
912 raise util.Abort('%s: %s' % (f, msg))
913
913
914 if not match:
914 if not match:
915 match = matchmod.always(self.root, '')
915 match = matchmod.always(self.root, '')
916
916
917 if not force:
917 if not force:
918 vdirs = []
918 vdirs = []
919 match.dir = vdirs.append
919 match.dir = vdirs.append
920 match.bad = fail
920 match.bad = fail
921
921
922 wlock = self.wlock()
922 wlock = self.wlock()
923 try:
923 try:
924 wctx = self[None]
924 wctx = self[None]
925 merge = len(wctx.parents()) > 1
925 merge = len(wctx.parents()) > 1
926
926
927 if (not force and merge and match and
927 if (not force and merge and match and
928 (match.files() or match.anypats())):
928 (match.files() or match.anypats())):
929 raise util.Abort(_('cannot partially commit a merge '
929 raise util.Abort(_('cannot partially commit a merge '
930 '(do not specify files or patterns)'))
930 '(do not specify files or patterns)'))
931
931
932 changes = self.status(match=match, clean=force)
932 changes = self.status(match=match, clean=force)
933 if force:
933 if force:
934 changes[0].extend(changes[6]) # mq may commit unchanged files
934 changes[0].extend(changes[6]) # mq may commit unchanged files
935
935
936 # check subrepos
936 # check subrepos
937 subs = []
937 subs = []
938 removedsubs = set()
938 removedsubs = set()
939 for p in wctx.parents():
939 for p in wctx.parents():
940 removedsubs.update(s for s in p.substate if match(s))
940 removedsubs.update(s for s in p.substate if match(s))
941 for s in wctx.substate:
941 for s in wctx.substate:
942 removedsubs.discard(s)
942 removedsubs.discard(s)
943 if match(s) and wctx.sub(s).dirty():
943 if match(s) and wctx.sub(s).dirty():
944 subs.append(s)
944 subs.append(s)
945 if (subs or removedsubs):
945 if (subs or removedsubs):
946 if (not match('.hgsub') and
946 if (not match('.hgsub') and
947 '.hgsub' in (wctx.modified() + wctx.added())):
947 '.hgsub' in (wctx.modified() + wctx.added())):
948 raise util.Abort(_("can't commit subrepos without .hgsub"))
948 raise util.Abort(_("can't commit subrepos without .hgsub"))
949 if '.hgsubstate' not in changes[0]:
949 if '.hgsubstate' not in changes[0]:
950 changes[0].insert(0, '.hgsubstate')
950 changes[0].insert(0, '.hgsubstate')
951
951
952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
954 if changedsubs:
954 if changedsubs:
955 raise util.Abort(_("uncommitted changes in subrepo %s")
955 raise util.Abort(_("uncommitted changes in subrepo %s")
956 % changedsubs[0])
956 % changedsubs[0])
957
957
958 # make sure all explicit patterns are matched
958 # make sure all explicit patterns are matched
959 if not force and match.files():
959 if not force and match.files():
960 matched = set(changes[0] + changes[1] + changes[2])
960 matched = set(changes[0] + changes[1] + changes[2])
961
961
962 for f in match.files():
962 for f in match.files():
963 if f == '.' or f in matched or f in wctx.substate:
963 if f == '.' or f in matched or f in wctx.substate:
964 continue
964 continue
965 if f in changes[3]: # missing
965 if f in changes[3]: # missing
966 fail(f, _('file not found!'))
966 fail(f, _('file not found!'))
967 if f in vdirs: # visited directory
967 if f in vdirs: # visited directory
968 d = f + '/'
968 d = f + '/'
969 for mf in matched:
969 for mf in matched:
970 if mf.startswith(d):
970 if mf.startswith(d):
971 break
971 break
972 else:
972 else:
973 fail(f, _("no match under directory!"))
973 fail(f, _("no match under directory!"))
974 elif f not in self.dirstate:
974 elif f not in self.dirstate:
975 fail(f, _("file not tracked!"))
975 fail(f, _("file not tracked!"))
976
976
977 if (not force and not extra.get("close") and not merge
977 if (not force and not extra.get("close") and not merge
978 and not (changes[0] or changes[1] or changes[2])
978 and not (changes[0] or changes[1] or changes[2])
979 and wctx.branch() == wctx.p1().branch()):
979 and wctx.branch() == wctx.p1().branch()):
980 return None
980 return None
981
981
982 ms = mergemod.mergestate(self)
982 ms = mergemod.mergestate(self)
983 for f in changes[0]:
983 for f in changes[0]:
984 if f in ms and ms[f] == 'u':
984 if f in ms and ms[f] == 'u':
985 raise util.Abort(_("unresolved merge conflicts "
985 raise util.Abort(_("unresolved merge conflicts "
986 "(see hg help resolve)"))
986 "(see hg help resolve)"))
987
987
988 cctx = context.workingctx(self, text, user, date, extra, changes)
988 cctx = context.workingctx(self, text, user, date, extra, changes)
989 if editor:
989 if editor:
990 cctx._text = editor(self, cctx, subs)
990 cctx._text = editor(self, cctx, subs)
991 edited = (text != cctx._text)
991 edited = (text != cctx._text)
992
992
993 # commit subs
993 # commit subs
994 if subs or removedsubs:
994 if subs or removedsubs:
995 state = wctx.substate.copy()
995 state = wctx.substate.copy()
996 for s in sorted(subs):
996 for s in sorted(subs):
997 sub = wctx.sub(s)
997 sub = wctx.sub(s)
998 self.ui.status(_('committing subrepository %s\n') %
998 self.ui.status(_('committing subrepository %s\n') %
999 subrepo.subrelpath(sub))
999 subrepo.subrelpath(sub))
1000 sr = sub.commit(cctx._text, user, date)
1000 sr = sub.commit(cctx._text, user, date)
1001 state[s] = (state[s][0], sr)
1001 state[s] = (state[s][0], sr)
1002 subrepo.writestate(self, state)
1002 subrepo.writestate(self, state)
1003
1003
1004 # Save commit message in case this transaction gets rolled back
1004 # Save commit message in case this transaction gets rolled back
1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1006 # the assumption that the user will use the same editor again.
1006 # the assumption that the user will use the same editor again.
1007 msgfile = self.opener('last-message.txt', 'wb')
1007 msgfile = self.opener('last-message.txt', 'wb')
1008 msgfile.write(cctx._text)
1008 msgfile.write(cctx._text)
1009 msgfile.close()
1009 msgfile.close()
1010
1010
1011 p1, p2 = self.dirstate.parents()
1011 p1, p2 = self.dirstate.parents()
1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1013 try:
1013 try:
1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1015 ret = self.commitctx(cctx, True)
1015 ret = self.commitctx(cctx, True)
1016 except:
1016 except:
1017 if edited:
1017 if edited:
1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1019 self.ui.write(
1019 self.ui.write(
1020 _('note: commit message saved in %s\n') % msgfn)
1020 _('note: commit message saved in %s\n') % msgfn)
1021 raise
1021 raise
1022
1022
1023 # update bookmarks, dirstate and mergestate
1023 # update bookmarks, dirstate and mergestate
1024 bookmarks.update(self, p1, ret)
1024 bookmarks.update(self, p1, ret)
1025 for f in changes[0] + changes[1]:
1025 for f in changes[0] + changes[1]:
1026 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1027 for f in changes[2]:
1027 for f in changes[2]:
1028 self.dirstate.forget(f)
1028 self.dirstate.forget(f)
1029 self.dirstate.setparents(ret)
1029 self.dirstate.setparents(ret)
1030 ms.reset()
1030 ms.reset()
1031 finally:
1031 finally:
1032 wlock.release()
1032 wlock.release()
1033
1033
1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1035 return ret
1035 return ret
1036
1036
1037 def commitctx(self, ctx, error=False):
1037 def commitctx(self, ctx, error=False):
1038 """Add a new revision to current repository.
1038 """Add a new revision to current repository.
1039 Revision information is passed via the context argument.
1039 Revision information is passed via the context argument.
1040 """
1040 """
1041
1041
1042 tr = lock = None
1042 tr = lock = None
1043 removed = list(ctx.removed())
1043 removed = list(ctx.removed())
1044 p1, p2 = ctx.p1(), ctx.p2()
1044 p1, p2 = ctx.p1(), ctx.p2()
1045 m1 = p1.manifest().copy()
1045 m1 = p1.manifest().copy()
1046 m2 = p2.manifest()
1046 m2 = p2.manifest()
1047 user = ctx.user()
1047 user = ctx.user()
1048
1048
1049 lock = self.lock()
1049 lock = self.lock()
1050 try:
1050 try:
1051 tr = self.transaction("commit")
1051 tr = self.transaction("commit")
1052 trp = weakref.proxy(tr)
1052 trp = weakref.proxy(tr)
1053
1053
1054 # check in files
1054 # check in files
1055 new = {}
1055 new = {}
1056 changed = []
1056 changed = []
1057 linkrev = len(self)
1057 linkrev = len(self)
1058 for f in sorted(ctx.modified() + ctx.added()):
1058 for f in sorted(ctx.modified() + ctx.added()):
1059 self.ui.note(f + "\n")
1059 self.ui.note(f + "\n")
1060 try:
1060 try:
1061 fctx = ctx[f]
1061 fctx = ctx[f]
1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1063 changed)
1063 changed)
1064 m1.set(f, fctx.flags())
1064 m1.set(f, fctx.flags())
1065 except OSError, inst:
1065 except OSError, inst:
1066 self.ui.warn(_("trouble committing %s!\n") % f)
1066 self.ui.warn(_("trouble committing %s!\n") % f)
1067 raise
1067 raise
1068 except IOError, inst:
1068 except IOError, inst:
1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1070 if error or errcode and errcode != errno.ENOENT:
1070 if error or errcode and errcode != errno.ENOENT:
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1072 raise
1072 raise
1073 else:
1073 else:
1074 removed.append(f)
1074 removed.append(f)
1075
1075
1076 # update manifest
1076 # update manifest
1077 m1.update(new)
1077 m1.update(new)
1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1079 drop = [f for f in removed if f in m1]
1079 drop = [f for f in removed if f in m1]
1080 for f in drop:
1080 for f in drop:
1081 del m1[f]
1081 del m1[f]
1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1083 p2.manifestnode(), (new, drop))
1083 p2.manifestnode(), (new, drop))
1084
1084
1085 # update changelog
1085 # update changelog
1086 self.changelog.delayupdate()
1086 self.changelog.delayupdate()
1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1088 trp, p1.node(), p2.node(),
1088 trp, p1.node(), p2.node(),
1089 user, ctx.date(), ctx.extra().copy())
1089 user, ctx.date(), ctx.extra().copy())
1090 p = lambda: self.changelog.writepending() and self.root or ""
1090 p = lambda: self.changelog.writepending() and self.root or ""
1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1093 parent2=xp2, pending=p)
1093 parent2=xp2, pending=p)
1094 self.changelog.finalize(trp)
1094 self.changelog.finalize(trp)
1095 tr.close()
1095 tr.close()
1096
1096
1097 if self._branchcache:
1097 if self._branchcache:
1098 self.updatebranchcache()
1098 self.updatebranchcache()
1099 return n
1099 return n
1100 finally:
1100 finally:
1101 if tr:
1101 if tr:
1102 tr.release()
1102 tr.release()
1103 lock.release()
1103 lock.release()
1104
1104
1105 def destroyed(self):
1105 def destroyed(self):
1106 '''Inform the repository that nodes have been destroyed.
1106 '''Inform the repository that nodes have been destroyed.
1107 Intended for use by strip and rollback, so there's a common
1107 Intended for use by strip and rollback, so there's a common
1108 place for anything that has to be done after destroying history.'''
1108 place for anything that has to be done after destroying history.'''
1109 # XXX it might be nice if we could take the list of destroyed
1109 # XXX it might be nice if we could take the list of destroyed
1110 # nodes, but I don't see an easy way for rollback() to do that
1110 # nodes, but I don't see an easy way for rollback() to do that
1111
1111
1112 # Ensure the persistent tag cache is updated. Doing it now
1112 # Ensure the persistent tag cache is updated. Doing it now
1113 # means that the tag cache only has to worry about destroyed
1113 # means that the tag cache only has to worry about destroyed
1114 # heads immediately after a strip/rollback. That in turn
1114 # heads immediately after a strip/rollback. That in turn
1115 # guarantees that "cachetip == currenttip" (comparing both rev
1115 # guarantees that "cachetip == currenttip" (comparing both rev
1116 # and node) always means no nodes have been added or destroyed.
1116 # and node) always means no nodes have been added or destroyed.
1117
1117
1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1119 # head, refresh the tag cache, then immediately add a new head.
1119 # head, refresh the tag cache, then immediately add a new head.
1120 # But I think doing it this way is necessary for the "instant
1120 # But I think doing it this way is necessary for the "instant
1121 # tag cache retrieval" case to work.
1121 # tag cache retrieval" case to work.
1122 self.invalidatecaches()
1122 self.invalidatecaches()
1123
1123
1124 def walk(self, match, node=None):
1124 def walk(self, match, node=None):
1125 '''
1125 '''
1126 walk recursively through the directory tree or a given
1126 walk recursively through the directory tree or a given
1127 changeset, finding all files matched by the match
1127 changeset, finding all files matched by the match
1128 function
1128 function
1129 '''
1129 '''
1130 return self[node].walk(match)
1130 return self[node].walk(match)
1131
1131
1132 def status(self, node1='.', node2=None, match=None,
1132 def status(self, node1='.', node2=None, match=None,
1133 ignored=False, clean=False, unknown=False,
1133 ignored=False, clean=False, unknown=False,
1134 listsubrepos=False):
1134 listsubrepos=False):
1135 """return status of files between two nodes or node and working directory
1135 """return status of files between two nodes or node and working directory
1136
1136
1137 If node1 is None, use the first dirstate parent instead.
1137 If node1 is None, use the first dirstate parent instead.
1138 If node2 is None, compare node1 with working directory.
1138 If node2 is None, compare node1 with working directory.
1139 """
1139 """
1140
1140
1141 def mfmatches(ctx):
1141 def mfmatches(ctx):
1142 mf = ctx.manifest().copy()
1142 mf = ctx.manifest().copy()
1143 for fn in mf.keys():
1143 for fn in mf.keys():
1144 if not match(fn):
1144 if not match(fn):
1145 del mf[fn]
1145 del mf[fn]
1146 return mf
1146 return mf
1147
1147
1148 if isinstance(node1, context.changectx):
1148 if isinstance(node1, context.changectx):
1149 ctx1 = node1
1149 ctx1 = node1
1150 else:
1150 else:
1151 ctx1 = self[node1]
1151 ctx1 = self[node1]
1152 if isinstance(node2, context.changectx):
1152 if isinstance(node2, context.changectx):
1153 ctx2 = node2
1153 ctx2 = node2
1154 else:
1154 else:
1155 ctx2 = self[node2]
1155 ctx2 = self[node2]
1156
1156
1157 working = ctx2.rev() is None
1157 working = ctx2.rev() is None
1158 parentworking = working and ctx1 == self['.']
1158 parentworking = working and ctx1 == self['.']
1159 match = match or matchmod.always(self.root, self.getcwd())
1159 match = match or matchmod.always(self.root, self.getcwd())
1160 listignored, listclean, listunknown = ignored, clean, unknown
1160 listignored, listclean, listunknown = ignored, clean, unknown
1161
1161
1162 # load earliest manifest first for caching reasons
1162 # load earliest manifest first for caching reasons
1163 if not working and ctx2.rev() < ctx1.rev():
1163 if not working and ctx2.rev() < ctx1.rev():
1164 ctx2.manifest()
1164 ctx2.manifest()
1165
1165
1166 if not parentworking:
1166 if not parentworking:
1167 def bad(f, msg):
1167 def bad(f, msg):
1168 if f not in ctx1:
1168 if f not in ctx1:
1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1170 match.bad = bad
1170 match.bad = bad
1171
1171
1172 if working: # we need to scan the working dir
1172 if working: # we need to scan the working dir
1173 subrepos = []
1173 subrepos = []
1174 if '.hgsub' in self.dirstate:
1174 if '.hgsub' in self.dirstate:
1175 subrepos = ctx1.substate.keys()
1175 subrepos = ctx1.substate.keys()
1176 s = self.dirstate.status(match, subrepos, listignored,
1176 s = self.dirstate.status(match, subrepos, listignored,
1177 listclean, listunknown)
1177 listclean, listunknown)
1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1179
1179
1180 # check for any possibly clean files
1180 # check for any possibly clean files
1181 if parentworking and cmp:
1181 if parentworking and cmp:
1182 fixup = []
1182 fixup = []
1183 # do a full compare of any files that might have changed
1183 # do a full compare of any files that might have changed
1184 for f in sorted(cmp):
1184 for f in sorted(cmp):
1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1186 or ctx1[f].cmp(ctx2[f])):
1186 or ctx1[f].cmp(ctx2[f])):
1187 modified.append(f)
1187 modified.append(f)
1188 else:
1188 else:
1189 fixup.append(f)
1189 fixup.append(f)
1190
1190
1191 # update dirstate for files that are actually clean
1191 # update dirstate for files that are actually clean
1192 if fixup:
1192 if fixup:
1193 if listclean:
1193 if listclean:
1194 clean += fixup
1194 clean += fixup
1195
1195
1196 try:
1196 try:
1197 # updating the dirstate is optional
1197 # updating the dirstate is optional
1198 # so we don't wait on the lock
1198 # so we don't wait on the lock
1199 wlock = self.wlock(False)
1199 wlock = self.wlock(False)
1200 try:
1200 try:
1201 for f in fixup:
1201 for f in fixup:
1202 self.dirstate.normal(f)
1202 self.dirstate.normal(f)
1203 finally:
1203 finally:
1204 wlock.release()
1204 wlock.release()
1205 except error.LockError:
1205 except error.LockError:
1206 pass
1206 pass
1207
1207
1208 if not parentworking:
1208 if not parentworking:
1209 mf1 = mfmatches(ctx1)
1209 mf1 = mfmatches(ctx1)
1210 if working:
1210 if working:
1211 # we are comparing working dir against non-parent
1211 # we are comparing working dir against non-parent
1212 # generate a pseudo-manifest for the working dir
1212 # generate a pseudo-manifest for the working dir
1213 mf2 = mfmatches(self['.'])
1213 mf2 = mfmatches(self['.'])
1214 for f in cmp + modified + added:
1214 for f in cmp + modified + added:
1215 mf2[f] = None
1215 mf2[f] = None
1216 mf2.set(f, ctx2.flags(f))
1216 mf2.set(f, ctx2.flags(f))
1217 for f in removed:
1217 for f in removed:
1218 if f in mf2:
1218 if f in mf2:
1219 del mf2[f]
1219 del mf2[f]
1220 else:
1220 else:
1221 # we are comparing two revisions
1221 # we are comparing two revisions
1222 deleted, unknown, ignored = [], [], []
1222 deleted, unknown, ignored = [], [], []
1223 mf2 = mfmatches(ctx2)
1223 mf2 = mfmatches(ctx2)
1224
1224
1225 modified, added, clean = [], [], []
1225 modified, added, clean = [], [], []
1226 for fn in mf2:
1226 for fn in mf2:
1227 if fn in mf1:
1227 if fn in mf1:
1228 if (mf1.flags(fn) != mf2.flags(fn) or
1228 if (fn not in deleted and
1229 (mf1.flags(fn) != mf2.flags(fn) or
1229 (mf1[fn] != mf2[fn] and
1230 (mf1[fn] != mf2[fn] and
1230 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1231 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1231 modified.append(fn)
1232 modified.append(fn)
1232 elif listclean:
1233 elif listclean:
1233 clean.append(fn)
1234 clean.append(fn)
1234 del mf1[fn]
1235 del mf1[fn]
1235 else:
1236 elif fn not in deleted:
1236 added.append(fn)
1237 added.append(fn)
1237 removed = mf1.keys()
1238 removed = mf1.keys()
1238
1239
1239 r = modified, added, removed, deleted, unknown, ignored, clean
1240 r = modified, added, removed, deleted, unknown, ignored, clean
1240
1241
1241 if listsubrepos:
1242 if listsubrepos:
1242 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1243 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1243 if working:
1244 if working:
1244 rev2 = None
1245 rev2 = None
1245 else:
1246 else:
1246 rev2 = ctx2.substate[subpath][1]
1247 rev2 = ctx2.substate[subpath][1]
1247 try:
1248 try:
1248 submatch = matchmod.narrowmatcher(subpath, match)
1249 submatch = matchmod.narrowmatcher(subpath, match)
1249 s = sub.status(rev2, match=submatch, ignored=listignored,
1250 s = sub.status(rev2, match=submatch, ignored=listignored,
1250 clean=listclean, unknown=listunknown,
1251 clean=listclean, unknown=listunknown,
1251 listsubrepos=True)
1252 listsubrepos=True)
1252 for rfiles, sfiles in zip(r, s):
1253 for rfiles, sfiles in zip(r, s):
1253 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1254 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1254 except error.LookupError:
1255 except error.LookupError:
1255 self.ui.status(_("skipping missing subrepository: %s\n")
1256 self.ui.status(_("skipping missing subrepository: %s\n")
1256 % subpath)
1257 % subpath)
1257
1258
1258 for l in r:
1259 for l in r:
1259 l.sort()
1260 l.sort()
1260 return r
1261 return r
1261
1262
1262 def heads(self, start=None):
1263 def heads(self, start=None):
1263 heads = self.changelog.heads(start)
1264 heads = self.changelog.heads(start)
1264 # sort the output in rev descending order
1265 # sort the output in rev descending order
1265 return sorted(heads, key=self.changelog.rev, reverse=True)
1266 return sorted(heads, key=self.changelog.rev, reverse=True)
1266
1267
1267 def branchheads(self, branch=None, start=None, closed=False):
1268 def branchheads(self, branch=None, start=None, closed=False):
1268 '''return a (possibly filtered) list of heads for the given branch
1269 '''return a (possibly filtered) list of heads for the given branch
1269
1270
1270 Heads are returned in topological order, from newest to oldest.
1271 Heads are returned in topological order, from newest to oldest.
1271 If branch is None, use the dirstate branch.
1272 If branch is None, use the dirstate branch.
1272 If start is not None, return only heads reachable from start.
1273 If start is not None, return only heads reachable from start.
1273 If closed is True, return heads that are marked as closed as well.
1274 If closed is True, return heads that are marked as closed as well.
1274 '''
1275 '''
1275 if branch is None:
1276 if branch is None:
1276 branch = self[None].branch()
1277 branch = self[None].branch()
1277 branches = self.branchmap()
1278 branches = self.branchmap()
1278 if branch not in branches:
1279 if branch not in branches:
1279 return []
1280 return []
1280 # the cache returns heads ordered lowest to highest
1281 # the cache returns heads ordered lowest to highest
1281 bheads = list(reversed(branches[branch]))
1282 bheads = list(reversed(branches[branch]))
1282 if start is not None:
1283 if start is not None:
1283 # filter out the heads that cannot be reached from startrev
1284 # filter out the heads that cannot be reached from startrev
1284 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1285 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1285 bheads = [h for h in bheads if h in fbheads]
1286 bheads = [h for h in bheads if h in fbheads]
1286 if not closed:
1287 if not closed:
1287 bheads = [h for h in bheads if
1288 bheads = [h for h in bheads if
1288 ('close' not in self.changelog.read(h)[5])]
1289 ('close' not in self.changelog.read(h)[5])]
1289 return bheads
1290 return bheads
1290
1291
1291 def branches(self, nodes):
1292 def branches(self, nodes):
1292 if not nodes:
1293 if not nodes:
1293 nodes = [self.changelog.tip()]
1294 nodes = [self.changelog.tip()]
1294 b = []
1295 b = []
1295 for n in nodes:
1296 for n in nodes:
1296 t = n
1297 t = n
1297 while 1:
1298 while 1:
1298 p = self.changelog.parents(n)
1299 p = self.changelog.parents(n)
1299 if p[1] != nullid or p[0] == nullid:
1300 if p[1] != nullid or p[0] == nullid:
1300 b.append((t, n, p[0], p[1]))
1301 b.append((t, n, p[0], p[1]))
1301 break
1302 break
1302 n = p[0]
1303 n = p[0]
1303 return b
1304 return b
1304
1305
1305 def between(self, pairs):
1306 def between(self, pairs):
1306 r = []
1307 r = []
1307
1308
1308 for top, bottom in pairs:
1309 for top, bottom in pairs:
1309 n, l, i = top, [], 0
1310 n, l, i = top, [], 0
1310 f = 1
1311 f = 1
1311
1312
1312 while n != bottom and n != nullid:
1313 while n != bottom and n != nullid:
1313 p = self.changelog.parents(n)[0]
1314 p = self.changelog.parents(n)[0]
1314 if i == f:
1315 if i == f:
1315 l.append(n)
1316 l.append(n)
1316 f = f * 2
1317 f = f * 2
1317 n = p
1318 n = p
1318 i += 1
1319 i += 1
1319
1320
1320 r.append(l)
1321 r.append(l)
1321
1322
1322 return r
1323 return r
1323
1324
1324 def pull(self, remote, heads=None, force=False):
1325 def pull(self, remote, heads=None, force=False):
1325 lock = self.lock()
1326 lock = self.lock()
1326 try:
1327 try:
1327 usecommon = remote.capable('getbundle')
1328 usecommon = remote.capable('getbundle')
1328 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1329 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1329 force=force, commononly=usecommon)
1330 force=force, commononly=usecommon)
1330 common, fetch, rheads = tmp
1331 common, fetch, rheads = tmp
1331 if not fetch:
1332 if not fetch:
1332 self.ui.status(_("no changes found\n"))
1333 self.ui.status(_("no changes found\n"))
1333 result = 0
1334 result = 0
1334 else:
1335 else:
1335 if heads is None and list(common) == [nullid]:
1336 if heads is None and list(common) == [nullid]:
1336 self.ui.status(_("requesting all changes\n"))
1337 self.ui.status(_("requesting all changes\n"))
1337 elif heads is None and remote.capable('changegroupsubset'):
1338 elif heads is None and remote.capable('changegroupsubset'):
1338 # issue1320, avoid a race if remote changed after discovery
1339 # issue1320, avoid a race if remote changed after discovery
1339 heads = rheads
1340 heads = rheads
1340
1341
1341 if usecommon:
1342 if usecommon:
1342 cg = remote.getbundle('pull', common=common,
1343 cg = remote.getbundle('pull', common=common,
1343 heads=heads or rheads)
1344 heads=heads or rheads)
1344 elif heads is None:
1345 elif heads is None:
1345 cg = remote.changegroup(fetch, 'pull')
1346 cg = remote.changegroup(fetch, 'pull')
1346 elif not remote.capable('changegroupsubset'):
1347 elif not remote.capable('changegroupsubset'):
1347 raise util.Abort(_("partial pull cannot be done because "
1348 raise util.Abort(_("partial pull cannot be done because "
1348 "other repository doesn't support "
1349 "other repository doesn't support "
1349 "changegroupsubset."))
1350 "changegroupsubset."))
1350 else:
1351 else:
1351 cg = remote.changegroupsubset(fetch, heads, 'pull')
1352 cg = remote.changegroupsubset(fetch, heads, 'pull')
1352 result = self.addchangegroup(cg, 'pull', remote.url(),
1353 result = self.addchangegroup(cg, 'pull', remote.url(),
1353 lock=lock)
1354 lock=lock)
1354 finally:
1355 finally:
1355 lock.release()
1356 lock.release()
1356
1357
1357 return result
1358 return result
1358
1359
1359 def checkpush(self, force, revs):
1360 def checkpush(self, force, revs):
1360 """Extensions can override this function if additional checks have
1361 """Extensions can override this function if additional checks have
1361 to be performed before pushing, or call it if they override push
1362 to be performed before pushing, or call it if they override push
1362 command.
1363 command.
1363 """
1364 """
1364 pass
1365 pass
1365
1366
1366 def push(self, remote, force=False, revs=None, newbranch=False):
1367 def push(self, remote, force=False, revs=None, newbranch=False):
1367 '''Push outgoing changesets (limited by revs) from the current
1368 '''Push outgoing changesets (limited by revs) from the current
1368 repository to remote. Return an integer:
1369 repository to remote. Return an integer:
1369 - 0 means HTTP error *or* nothing to push
1370 - 0 means HTTP error *or* nothing to push
1370 - 1 means we pushed and remote head count is unchanged *or*
1371 - 1 means we pushed and remote head count is unchanged *or*
1371 we have outgoing changesets but refused to push
1372 we have outgoing changesets but refused to push
1372 - other values as described by addchangegroup()
1373 - other values as described by addchangegroup()
1373 '''
1374 '''
1374 # there are two ways to push to remote repo:
1375 # there are two ways to push to remote repo:
1375 #
1376 #
1376 # addchangegroup assumes local user can lock remote
1377 # addchangegroup assumes local user can lock remote
1377 # repo (local filesystem, old ssh servers).
1378 # repo (local filesystem, old ssh servers).
1378 #
1379 #
1379 # unbundle assumes local user cannot lock remote repo (new ssh
1380 # unbundle assumes local user cannot lock remote repo (new ssh
1380 # servers, http servers).
1381 # servers, http servers).
1381
1382
1382 self.checkpush(force, revs)
1383 self.checkpush(force, revs)
1383 lock = None
1384 lock = None
1384 unbundle = remote.capable('unbundle')
1385 unbundle = remote.capable('unbundle')
1385 if not unbundle:
1386 if not unbundle:
1386 lock = remote.lock()
1387 lock = remote.lock()
1387 try:
1388 try:
1388 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1389 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1389 newbranch)
1390 newbranch)
1390 ret = remote_heads
1391 ret = remote_heads
1391 if cg is not None:
1392 if cg is not None:
1392 if unbundle:
1393 if unbundle:
1393 # local repo finds heads on server, finds out what
1394 # local repo finds heads on server, finds out what
1394 # revs it must push. once revs transferred, if server
1395 # revs it must push. once revs transferred, if server
1395 # finds it has different heads (someone else won
1396 # finds it has different heads (someone else won
1396 # commit/push race), server aborts.
1397 # commit/push race), server aborts.
1397 if force:
1398 if force:
1398 remote_heads = ['force']
1399 remote_heads = ['force']
1399 # ssh: return remote's addchangegroup()
1400 # ssh: return remote's addchangegroup()
1400 # http: return remote's addchangegroup() or 0 for error
1401 # http: return remote's addchangegroup() or 0 for error
1401 ret = remote.unbundle(cg, remote_heads, 'push')
1402 ret = remote.unbundle(cg, remote_heads, 'push')
1402 else:
1403 else:
1403 # we return an integer indicating remote head count change
1404 # we return an integer indicating remote head count change
1404 ret = remote.addchangegroup(cg, 'push', self.url(),
1405 ret = remote.addchangegroup(cg, 'push', self.url(),
1405 lock=lock)
1406 lock=lock)
1406 finally:
1407 finally:
1407 if lock is not None:
1408 if lock is not None:
1408 lock.release()
1409 lock.release()
1409
1410
1410 self.ui.debug("checking for updated bookmarks\n")
1411 self.ui.debug("checking for updated bookmarks\n")
1411 rb = remote.listkeys('bookmarks')
1412 rb = remote.listkeys('bookmarks')
1412 for k in rb.keys():
1413 for k in rb.keys():
1413 if k in self._bookmarks:
1414 if k in self._bookmarks:
1414 nr, nl = rb[k], hex(self._bookmarks[k])
1415 nr, nl = rb[k], hex(self._bookmarks[k])
1415 if nr in self:
1416 if nr in self:
1416 cr = self[nr]
1417 cr = self[nr]
1417 cl = self[nl]
1418 cl = self[nl]
1418 if cl in cr.descendants():
1419 if cl in cr.descendants():
1419 r = remote.pushkey('bookmarks', k, nr, nl)
1420 r = remote.pushkey('bookmarks', k, nr, nl)
1420 if r:
1421 if r:
1421 self.ui.status(_("updating bookmark %s\n") % k)
1422 self.ui.status(_("updating bookmark %s\n") % k)
1422 else:
1423 else:
1423 self.ui.warn(_('updating bookmark %s'
1424 self.ui.warn(_('updating bookmark %s'
1424 ' failed!\n') % k)
1425 ' failed!\n') % k)
1425
1426
1426 return ret
1427 return ret
1427
1428
1428 def changegroupinfo(self, nodes, source):
1429 def changegroupinfo(self, nodes, source):
1429 if self.ui.verbose or source == 'bundle':
1430 if self.ui.verbose or source == 'bundle':
1430 self.ui.status(_("%d changesets found\n") % len(nodes))
1431 self.ui.status(_("%d changesets found\n") % len(nodes))
1431 if self.ui.debugflag:
1432 if self.ui.debugflag:
1432 self.ui.debug("list of changesets:\n")
1433 self.ui.debug("list of changesets:\n")
1433 for node in nodes:
1434 for node in nodes:
1434 self.ui.debug("%s\n" % hex(node))
1435 self.ui.debug("%s\n" % hex(node))
1435
1436
1436 def changegroupsubset(self, bases, heads, source):
1437 def changegroupsubset(self, bases, heads, source):
1437 """Compute a changegroup consisting of all the nodes that are
1438 """Compute a changegroup consisting of all the nodes that are
1438 descendents of any of the bases and ancestors of any of the heads.
1439 descendents of any of the bases and ancestors of any of the heads.
1439 Return a chunkbuffer object whose read() method will return
1440 Return a chunkbuffer object whose read() method will return
1440 successive changegroup chunks.
1441 successive changegroup chunks.
1441
1442
1442 It is fairly complex as determining which filenodes and which
1443 It is fairly complex as determining which filenodes and which
1443 manifest nodes need to be included for the changeset to be complete
1444 manifest nodes need to be included for the changeset to be complete
1444 is non-trivial.
1445 is non-trivial.
1445
1446
1446 Another wrinkle is doing the reverse, figuring out which changeset in
1447 Another wrinkle is doing the reverse, figuring out which changeset in
1447 the changegroup a particular filenode or manifestnode belongs to.
1448 the changegroup a particular filenode or manifestnode belongs to.
1448 """
1449 """
1449 cl = self.changelog
1450 cl = self.changelog
1450 if not bases:
1451 if not bases:
1451 bases = [nullid]
1452 bases = [nullid]
1452 csets, bases, heads = cl.nodesbetween(bases, heads)
1453 csets, bases, heads = cl.nodesbetween(bases, heads)
1453 # We assume that all ancestors of bases are known
1454 # We assume that all ancestors of bases are known
1454 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1455 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1455 return self._changegroupsubset(common, csets, heads, source)
1456 return self._changegroupsubset(common, csets, heads, source)
1456
1457
1457 def getbundle(self, source, heads=None, common=None):
1458 def getbundle(self, source, heads=None, common=None):
1458 """Like changegroupsubset, but returns the set difference between the
1459 """Like changegroupsubset, but returns the set difference between the
1459 ancestors of heads and the ancestors common.
1460 ancestors of heads and the ancestors common.
1460
1461
1461 If heads is None, use the local heads. If common is None, use [nullid].
1462 If heads is None, use the local heads. If common is None, use [nullid].
1462
1463
1463 The nodes in common might not all be known locally due to the way the
1464 The nodes in common might not all be known locally due to the way the
1464 current discovery protocol works.
1465 current discovery protocol works.
1465 """
1466 """
1466 cl = self.changelog
1467 cl = self.changelog
1467 if common:
1468 if common:
1468 nm = cl.nodemap
1469 nm = cl.nodemap
1469 common = [n for n in common if n in nm]
1470 common = [n for n in common if n in nm]
1470 else:
1471 else:
1471 common = [nullid]
1472 common = [nullid]
1472 if not heads:
1473 if not heads:
1473 heads = cl.heads()
1474 heads = cl.heads()
1474 common, missing = cl.findcommonmissing(common, heads)
1475 common, missing = cl.findcommonmissing(common, heads)
1475 return self._changegroupsubset(common, missing, heads, source)
1476 return self._changegroupsubset(common, missing, heads, source)
1476
1477
1477 def _changegroupsubset(self, commonrevs, csets, heads, source):
1478 def _changegroupsubset(self, commonrevs, csets, heads, source):
1478
1479
1479 cl = self.changelog
1480 cl = self.changelog
1480 mf = self.manifest
1481 mf = self.manifest
1481 mfs = {} # needed manifests
1482 mfs = {} # needed manifests
1482 fnodes = {} # needed file nodes
1483 fnodes = {} # needed file nodes
1483 changedfiles = set()
1484 changedfiles = set()
1484 fstate = ['', {}]
1485 fstate = ['', {}]
1485 count = [0]
1486 count = [0]
1486
1487
1487 # can we go through the fast path ?
1488 # can we go through the fast path ?
1488 heads.sort()
1489 heads.sort()
1489 if heads == sorted(self.heads()):
1490 if heads == sorted(self.heads()):
1490 return self._changegroup(csets, source)
1491 return self._changegroup(csets, source)
1491
1492
1492 # slow path
1493 # slow path
1493 self.hook('preoutgoing', throw=True, source=source)
1494 self.hook('preoutgoing', throw=True, source=source)
1494 self.changegroupinfo(csets, source)
1495 self.changegroupinfo(csets, source)
1495
1496
1496 # filter any nodes that claim to be part of the known set
1497 # filter any nodes that claim to be part of the known set
1497 def prune(revlog, missing):
1498 def prune(revlog, missing):
1498 for n in missing:
1499 for n in missing:
1499 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1500 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1500 yield n
1501 yield n
1501
1502
1502 def lookup(revlog, x):
1503 def lookup(revlog, x):
1503 if revlog == cl:
1504 if revlog == cl:
1504 c = cl.read(x)
1505 c = cl.read(x)
1505 changedfiles.update(c[3])
1506 changedfiles.update(c[3])
1506 mfs.setdefault(c[0], x)
1507 mfs.setdefault(c[0], x)
1507 count[0] += 1
1508 count[0] += 1
1508 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1509 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1509 return x
1510 return x
1510 elif revlog == mf:
1511 elif revlog == mf:
1511 clnode = mfs[x]
1512 clnode = mfs[x]
1512 mdata = mf.readfast(x)
1513 mdata = mf.readfast(x)
1513 for f in changedfiles:
1514 for f in changedfiles:
1514 if f in mdata:
1515 if f in mdata:
1515 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1516 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1516 count[0] += 1
1517 count[0] += 1
1517 self.ui.progress(_('bundling'), count[0],
1518 self.ui.progress(_('bundling'), count[0],
1518 unit=_('manifests'), total=len(mfs))
1519 unit=_('manifests'), total=len(mfs))
1519 return mfs[x]
1520 return mfs[x]
1520 else:
1521 else:
1521 self.ui.progress(
1522 self.ui.progress(
1522 _('bundling'), count[0], item=fstate[0],
1523 _('bundling'), count[0], item=fstate[0],
1523 unit=_('files'), total=len(changedfiles))
1524 unit=_('files'), total=len(changedfiles))
1524 return fstate[1][x]
1525 return fstate[1][x]
1525
1526
1526 bundler = changegroup.bundle10(lookup)
1527 bundler = changegroup.bundle10(lookup)
1527
1528
1528 def gengroup():
1529 def gengroup():
1529 # Create a changenode group generator that will call our functions
1530 # Create a changenode group generator that will call our functions
1530 # back to lookup the owning changenode and collect information.
1531 # back to lookup the owning changenode and collect information.
1531 for chunk in cl.group(csets, bundler):
1532 for chunk in cl.group(csets, bundler):
1532 yield chunk
1533 yield chunk
1533 self.ui.progress(_('bundling'), None)
1534 self.ui.progress(_('bundling'), None)
1534
1535
1535 # Create a generator for the manifestnodes that calls our lookup
1536 # Create a generator for the manifestnodes that calls our lookup
1536 # and data collection functions back.
1537 # and data collection functions back.
1537 count[0] = 0
1538 count[0] = 0
1538 for chunk in mf.group(prune(mf, mfs), bundler):
1539 for chunk in mf.group(prune(mf, mfs), bundler):
1539 yield chunk
1540 yield chunk
1540 self.ui.progress(_('bundling'), None)
1541 self.ui.progress(_('bundling'), None)
1541
1542
1542 mfs.clear()
1543 mfs.clear()
1543
1544
1544 # Go through all our files in order sorted by name.
1545 # Go through all our files in order sorted by name.
1545 count[0] = 0
1546 count[0] = 0
1546 for fname in sorted(changedfiles):
1547 for fname in sorted(changedfiles):
1547 filerevlog = self.file(fname)
1548 filerevlog = self.file(fname)
1548 if not len(filerevlog):
1549 if not len(filerevlog):
1549 raise util.Abort(_("empty or missing revlog for %s") % fname)
1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1550 fstate[0] = fname
1551 fstate[0] = fname
1551 fstate[1] = fnodes.pop(fname, {})
1552 fstate[1] = fnodes.pop(fname, {})
1552 first = True
1553 first = True
1553
1554
1554 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1555 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1555 bundler):
1556 bundler):
1556 if first:
1557 if first:
1557 if chunk == bundler.close():
1558 if chunk == bundler.close():
1558 break
1559 break
1559 count[0] += 1
1560 count[0] += 1
1560 yield bundler.fileheader(fname)
1561 yield bundler.fileheader(fname)
1561 first = False
1562 first = False
1562 yield chunk
1563 yield chunk
1563 # Signal that no more groups are left.
1564 # Signal that no more groups are left.
1564 yield bundler.close()
1565 yield bundler.close()
1565 self.ui.progress(_('bundling'), None)
1566 self.ui.progress(_('bundling'), None)
1566
1567
1567 if csets:
1568 if csets:
1568 self.hook('outgoing', node=hex(csets[0]), source=source)
1569 self.hook('outgoing', node=hex(csets[0]), source=source)
1569
1570
1570 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1571 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1571
1572
1572 def changegroup(self, basenodes, source):
1573 def changegroup(self, basenodes, source):
1573 # to avoid a race we use changegroupsubset() (issue1320)
1574 # to avoid a race we use changegroupsubset() (issue1320)
1574 return self.changegroupsubset(basenodes, self.heads(), source)
1575 return self.changegroupsubset(basenodes, self.heads(), source)
1575
1576
1576 def _changegroup(self, nodes, source):
1577 def _changegroup(self, nodes, source):
1577 """Compute the changegroup of all nodes that we have that a recipient
1578 """Compute the changegroup of all nodes that we have that a recipient
1578 doesn't. Return a chunkbuffer object whose read() method will return
1579 doesn't. Return a chunkbuffer object whose read() method will return
1579 successive changegroup chunks.
1580 successive changegroup chunks.
1580
1581
1581 This is much easier than the previous function as we can assume that
1582 This is much easier than the previous function as we can assume that
1582 the recipient has any changenode we aren't sending them.
1583 the recipient has any changenode we aren't sending them.
1583
1584
1584 nodes is the set of nodes to send"""
1585 nodes is the set of nodes to send"""
1585
1586
1586 cl = self.changelog
1587 cl = self.changelog
1587 mf = self.manifest
1588 mf = self.manifest
1588 mfs = {}
1589 mfs = {}
1589 changedfiles = set()
1590 changedfiles = set()
1590 fstate = ['']
1591 fstate = ['']
1591 count = [0]
1592 count = [0]
1592
1593
1593 self.hook('preoutgoing', throw=True, source=source)
1594 self.hook('preoutgoing', throw=True, source=source)
1594 self.changegroupinfo(nodes, source)
1595 self.changegroupinfo(nodes, source)
1595
1596
1596 revset = set([cl.rev(n) for n in nodes])
1597 revset = set([cl.rev(n) for n in nodes])
1597
1598
1598 def gennodelst(log):
1599 def gennodelst(log):
1599 for r in log:
1600 for r in log:
1600 if log.linkrev(r) in revset:
1601 if log.linkrev(r) in revset:
1601 yield log.node(r)
1602 yield log.node(r)
1602
1603
1603 def lookup(revlog, x):
1604 def lookup(revlog, x):
1604 if revlog == cl:
1605 if revlog == cl:
1605 c = cl.read(x)
1606 c = cl.read(x)
1606 changedfiles.update(c[3])
1607 changedfiles.update(c[3])
1607 mfs.setdefault(c[0], x)
1608 mfs.setdefault(c[0], x)
1608 count[0] += 1
1609 count[0] += 1
1609 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1610 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1610 return x
1611 return x
1611 elif revlog == mf:
1612 elif revlog == mf:
1612 count[0] += 1
1613 count[0] += 1
1613 self.ui.progress(_('bundling'), count[0],
1614 self.ui.progress(_('bundling'), count[0],
1614 unit=_('manifests'), total=len(mfs))
1615 unit=_('manifests'), total=len(mfs))
1615 return cl.node(revlog.linkrev(revlog.rev(x)))
1616 return cl.node(revlog.linkrev(revlog.rev(x)))
1616 else:
1617 else:
1617 self.ui.progress(
1618 self.ui.progress(
1618 _('bundling'), count[0], item=fstate[0],
1619 _('bundling'), count[0], item=fstate[0],
1619 total=len(changedfiles), unit=_('files'))
1620 total=len(changedfiles), unit=_('files'))
1620 return cl.node(revlog.linkrev(revlog.rev(x)))
1621 return cl.node(revlog.linkrev(revlog.rev(x)))
1621
1622
1622 bundler = changegroup.bundle10(lookup)
1623 bundler = changegroup.bundle10(lookup)
1623
1624
1624 def gengroup():
1625 def gengroup():
1625 '''yield a sequence of changegroup chunks (strings)'''
1626 '''yield a sequence of changegroup chunks (strings)'''
1626 # construct a list of all changed files
1627 # construct a list of all changed files
1627
1628
1628 for chunk in cl.group(nodes, bundler):
1629 for chunk in cl.group(nodes, bundler):
1629 yield chunk
1630 yield chunk
1630 self.ui.progress(_('bundling'), None)
1631 self.ui.progress(_('bundling'), None)
1631
1632
1632 count[0] = 0
1633 count[0] = 0
1633 for chunk in mf.group(gennodelst(mf), bundler):
1634 for chunk in mf.group(gennodelst(mf), bundler):
1634 yield chunk
1635 yield chunk
1635 self.ui.progress(_('bundling'), None)
1636 self.ui.progress(_('bundling'), None)
1636
1637
1637 count[0] = 0
1638 count[0] = 0
1638 for fname in sorted(changedfiles):
1639 for fname in sorted(changedfiles):
1639 filerevlog = self.file(fname)
1640 filerevlog = self.file(fname)
1640 if not len(filerevlog):
1641 if not len(filerevlog):
1641 raise util.Abort(_("empty or missing revlog for %s") % fname)
1642 raise util.Abort(_("empty or missing revlog for %s") % fname)
1642 fstate[0] = fname
1643 fstate[0] = fname
1643 first = True
1644 first = True
1644 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1645 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1645 if first:
1646 if first:
1646 if chunk == bundler.close():
1647 if chunk == bundler.close():
1647 break
1648 break
1648 count[0] += 1
1649 count[0] += 1
1649 yield bundler.fileheader(fname)
1650 yield bundler.fileheader(fname)
1650 first = False
1651 first = False
1651 yield chunk
1652 yield chunk
1652 yield bundler.close()
1653 yield bundler.close()
1653 self.ui.progress(_('bundling'), None)
1654 self.ui.progress(_('bundling'), None)
1654
1655
1655 if nodes:
1656 if nodes:
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657
1658
1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659
1660
1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 """Add the changegroup returned by source.read() to this repo.
1662 """Add the changegroup returned by source.read() to this repo.
1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 the URL of the repo where this changegroup is coming from.
1664 the URL of the repo where this changegroup is coming from.
1664 If lock is not None, the function takes ownership of the lock
1665 If lock is not None, the function takes ownership of the lock
1665 and releases it after the changegroup is added.
1666 and releases it after the changegroup is added.
1666
1667
1667 Return an integer summarizing the change to this repo:
1668 Return an integer summarizing the change to this repo:
1668 - nothing changed or no source: 0
1669 - nothing changed or no source: 0
1669 - more heads than before: 1+added heads (2..n)
1670 - more heads than before: 1+added heads (2..n)
1670 - fewer heads than before: -1-removed heads (-2..-n)
1671 - fewer heads than before: -1-removed heads (-2..-n)
1671 - number of heads stays the same: 1
1672 - number of heads stays the same: 1
1672 """
1673 """
1673 def csmap(x):
1674 def csmap(x):
1674 self.ui.debug("add changeset %s\n" % short(x))
1675 self.ui.debug("add changeset %s\n" % short(x))
1675 return len(cl)
1676 return len(cl)
1676
1677
1677 def revmap(x):
1678 def revmap(x):
1678 return cl.rev(x)
1679 return cl.rev(x)
1679
1680
1680 if not source:
1681 if not source:
1681 return 0
1682 return 0
1682
1683
1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684
1685
1685 changesets = files = revisions = 0
1686 changesets = files = revisions = 0
1686 efiles = set()
1687 efiles = set()
1687
1688
1688 # write changelog data to temp files so concurrent readers will not see
1689 # write changelog data to temp files so concurrent readers will not see
1689 # inconsistent view
1690 # inconsistent view
1690 cl = self.changelog
1691 cl = self.changelog
1691 cl.delayupdate()
1692 cl.delayupdate()
1692 oldheads = len(cl.heads())
1693 oldheads = len(cl.heads())
1693
1694
1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 try:
1696 try:
1696 trp = weakref.proxy(tr)
1697 trp = weakref.proxy(tr)
1697 # pull off the changeset group
1698 # pull off the changeset group
1698 self.ui.status(_("adding changesets\n"))
1699 self.ui.status(_("adding changesets\n"))
1699 clstart = len(cl)
1700 clstart = len(cl)
1700 class prog(object):
1701 class prog(object):
1701 step = _('changesets')
1702 step = _('changesets')
1702 count = 1
1703 count = 1
1703 ui = self.ui
1704 ui = self.ui
1704 total = None
1705 total = None
1705 def __call__(self):
1706 def __call__(self):
1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 total=self.total)
1708 total=self.total)
1708 self.count += 1
1709 self.count += 1
1709 pr = prog()
1710 pr = prog()
1710 source.callback = pr
1711 source.callback = pr
1711
1712
1712 if (cl.addgroup(source, csmap, trp) is None
1713 if (cl.addgroup(source, csmap, trp) is None
1713 and not emptyok):
1714 and not emptyok):
1714 raise util.Abort(_("received changelog group is empty"))
1715 raise util.Abort(_("received changelog group is empty"))
1715 clend = len(cl)
1716 clend = len(cl)
1716 changesets = clend - clstart
1717 changesets = clend - clstart
1717 for c in xrange(clstart, clend):
1718 for c in xrange(clstart, clend):
1718 efiles.update(self[c].files())
1719 efiles.update(self[c].files())
1719 efiles = len(efiles)
1720 efiles = len(efiles)
1720 self.ui.progress(_('changesets'), None)
1721 self.ui.progress(_('changesets'), None)
1721
1722
1722 # pull off the manifest group
1723 # pull off the manifest group
1723 self.ui.status(_("adding manifests\n"))
1724 self.ui.status(_("adding manifests\n"))
1724 pr.step = _('manifests')
1725 pr.step = _('manifests')
1725 pr.count = 1
1726 pr.count = 1
1726 pr.total = changesets # manifests <= changesets
1727 pr.total = changesets # manifests <= changesets
1727 # no need to check for empty manifest group here:
1728 # no need to check for empty manifest group here:
1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 # no new manifest will be created and the manifest group will
1730 # no new manifest will be created and the manifest group will
1730 # be empty during the pull
1731 # be empty during the pull
1731 self.manifest.addgroup(source, revmap, trp)
1732 self.manifest.addgroup(source, revmap, trp)
1732 self.ui.progress(_('manifests'), None)
1733 self.ui.progress(_('manifests'), None)
1733
1734
1734 needfiles = {}
1735 needfiles = {}
1735 if self.ui.configbool('server', 'validate', default=False):
1736 if self.ui.configbool('server', 'validate', default=False):
1736 # validate incoming csets have their manifests
1737 # validate incoming csets have their manifests
1737 for cset in xrange(clstart, clend):
1738 for cset in xrange(clstart, clend):
1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 mfest = self.manifest.readdelta(mfest)
1740 mfest = self.manifest.readdelta(mfest)
1740 # store file nodes we must see
1741 # store file nodes we must see
1741 for f, n in mfest.iteritems():
1742 for f, n in mfest.iteritems():
1742 needfiles.setdefault(f, set()).add(n)
1743 needfiles.setdefault(f, set()).add(n)
1743
1744
1744 # process the files
1745 # process the files
1745 self.ui.status(_("adding file changes\n"))
1746 self.ui.status(_("adding file changes\n"))
1746 pr.step = 'files'
1747 pr.step = 'files'
1747 pr.count = 1
1748 pr.count = 1
1748 pr.total = efiles
1749 pr.total = efiles
1749 source.callback = None
1750 source.callback = None
1750
1751
1751 while 1:
1752 while 1:
1752 f = source.chunk()
1753 f = source.chunk()
1753 if not f:
1754 if not f:
1754 break
1755 break
1755 self.ui.debug("adding %s revisions\n" % f)
1756 self.ui.debug("adding %s revisions\n" % f)
1756 pr()
1757 pr()
1757 fl = self.file(f)
1758 fl = self.file(f)
1758 o = len(fl)
1759 o = len(fl)
1759 if fl.addgroup(source, revmap, trp) is None:
1760 if fl.addgroup(source, revmap, trp) is None:
1760 raise util.Abort(_("received file revlog group is empty"))
1761 raise util.Abort(_("received file revlog group is empty"))
1761 revisions += len(fl) - o
1762 revisions += len(fl) - o
1762 files += 1
1763 files += 1
1763 if f in needfiles:
1764 if f in needfiles:
1764 needs = needfiles[f]
1765 needs = needfiles[f]
1765 for new in xrange(o, len(fl)):
1766 for new in xrange(o, len(fl)):
1766 n = fl.node(new)
1767 n = fl.node(new)
1767 if n in needs:
1768 if n in needs:
1768 needs.remove(n)
1769 needs.remove(n)
1769 if not needs:
1770 if not needs:
1770 del needfiles[f]
1771 del needfiles[f]
1771 self.ui.progress(_('files'), None)
1772 self.ui.progress(_('files'), None)
1772
1773
1773 for f, needs in needfiles.iteritems():
1774 for f, needs in needfiles.iteritems():
1774 fl = self.file(f)
1775 fl = self.file(f)
1775 for n in needs:
1776 for n in needs:
1776 try:
1777 try:
1777 fl.rev(n)
1778 fl.rev(n)
1778 except error.LookupError:
1779 except error.LookupError:
1779 raise util.Abort(
1780 raise util.Abort(
1780 _('missing file data for %s:%s - run hg verify') %
1781 _('missing file data for %s:%s - run hg verify') %
1781 (f, hex(n)))
1782 (f, hex(n)))
1782
1783
1783 newheads = len(cl.heads())
1784 newheads = len(cl.heads())
1784 heads = ""
1785 heads = ""
1785 if oldheads and newheads != oldheads:
1786 if oldheads and newheads != oldheads:
1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1787 heads = _(" (%+d heads)") % (newheads - oldheads)
1787
1788
1788 self.ui.status(_("added %d changesets"
1789 self.ui.status(_("added %d changesets"
1789 " with %d changes to %d files%s\n")
1790 " with %d changes to %d files%s\n")
1790 % (changesets, revisions, files, heads))
1791 % (changesets, revisions, files, heads))
1791
1792
1792 if changesets > 0:
1793 if changesets > 0:
1793 p = lambda: cl.writepending() and self.root or ""
1794 p = lambda: cl.writepending() and self.root or ""
1794 self.hook('pretxnchangegroup', throw=True,
1795 self.hook('pretxnchangegroup', throw=True,
1795 node=hex(cl.node(clstart)), source=srctype,
1796 node=hex(cl.node(clstart)), source=srctype,
1796 url=url, pending=p)
1797 url=url, pending=p)
1797
1798
1798 # make changelog see real files again
1799 # make changelog see real files again
1799 cl.finalize(trp)
1800 cl.finalize(trp)
1800
1801
1801 tr.close()
1802 tr.close()
1802 finally:
1803 finally:
1803 tr.release()
1804 tr.release()
1804 if lock:
1805 if lock:
1805 lock.release()
1806 lock.release()
1806
1807
1807 if changesets > 0:
1808 if changesets > 0:
1808 # forcefully update the on-disk branch cache
1809 # forcefully update the on-disk branch cache
1809 self.ui.debug("updating the branch cache\n")
1810 self.ui.debug("updating the branch cache\n")
1810 self.updatebranchcache()
1811 self.updatebranchcache()
1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 source=srctype, url=url)
1813 source=srctype, url=url)
1813
1814
1814 for i in xrange(clstart, clend):
1815 for i in xrange(clstart, clend):
1815 self.hook("incoming", node=hex(cl.node(i)),
1816 self.hook("incoming", node=hex(cl.node(i)),
1816 source=srctype, url=url)
1817 source=srctype, url=url)
1817
1818
1818 # never return 0 here:
1819 # never return 0 here:
1819 if newheads < oldheads:
1820 if newheads < oldheads:
1820 return newheads - oldheads - 1
1821 return newheads - oldheads - 1
1821 else:
1822 else:
1822 return newheads - oldheads + 1
1823 return newheads - oldheads + 1
1823
1824
1824
1825
1825 def stream_in(self, remote, requirements):
1826 def stream_in(self, remote, requirements):
1826 lock = self.lock()
1827 lock = self.lock()
1827 try:
1828 try:
1828 fp = remote.stream_out()
1829 fp = remote.stream_out()
1829 l = fp.readline()
1830 l = fp.readline()
1830 try:
1831 try:
1831 resp = int(l)
1832 resp = int(l)
1832 except ValueError:
1833 except ValueError:
1833 raise error.ResponseError(
1834 raise error.ResponseError(
1834 _('Unexpected response from remote server:'), l)
1835 _('Unexpected response from remote server:'), l)
1835 if resp == 1:
1836 if resp == 1:
1836 raise util.Abort(_('operation forbidden by server'))
1837 raise util.Abort(_('operation forbidden by server'))
1837 elif resp == 2:
1838 elif resp == 2:
1838 raise util.Abort(_('locking the remote repository failed'))
1839 raise util.Abort(_('locking the remote repository failed'))
1839 elif resp != 0:
1840 elif resp != 0:
1840 raise util.Abort(_('the server sent an unknown error code'))
1841 raise util.Abort(_('the server sent an unknown error code'))
1841 self.ui.status(_('streaming all changes\n'))
1842 self.ui.status(_('streaming all changes\n'))
1842 l = fp.readline()
1843 l = fp.readline()
1843 try:
1844 try:
1844 total_files, total_bytes = map(int, l.split(' ', 1))
1845 total_files, total_bytes = map(int, l.split(' ', 1))
1845 except (ValueError, TypeError):
1846 except (ValueError, TypeError):
1846 raise error.ResponseError(
1847 raise error.ResponseError(
1847 _('Unexpected response from remote server:'), l)
1848 _('Unexpected response from remote server:'), l)
1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 (total_files, util.bytecount(total_bytes)))
1850 (total_files, util.bytecount(total_bytes)))
1850 start = time.time()
1851 start = time.time()
1851 for i in xrange(total_files):
1852 for i in xrange(total_files):
1852 # XXX doesn't support '\n' or '\r' in filenames
1853 # XXX doesn't support '\n' or '\r' in filenames
1853 l = fp.readline()
1854 l = fp.readline()
1854 try:
1855 try:
1855 name, size = l.split('\0', 1)
1856 name, size = l.split('\0', 1)
1856 size = int(size)
1857 size = int(size)
1857 except (ValueError, TypeError):
1858 except (ValueError, TypeError):
1858 raise error.ResponseError(
1859 raise error.ResponseError(
1859 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 # for backwards compat, name was partially encoded
1862 # for backwards compat, name was partially encoded
1862 ofp = self.sopener(store.decodedir(name), 'w')
1863 ofp = self.sopener(store.decodedir(name), 'w')
1863 for chunk in util.filechunkiter(fp, limit=size):
1864 for chunk in util.filechunkiter(fp, limit=size):
1864 ofp.write(chunk)
1865 ofp.write(chunk)
1865 ofp.close()
1866 ofp.close()
1866 elapsed = time.time() - start
1867 elapsed = time.time() - start
1867 if elapsed <= 0:
1868 if elapsed <= 0:
1868 elapsed = 0.001
1869 elapsed = 0.001
1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 (util.bytecount(total_bytes), elapsed,
1871 (util.bytecount(total_bytes), elapsed,
1871 util.bytecount(total_bytes / elapsed)))
1872 util.bytecount(total_bytes / elapsed)))
1872
1873
1873 # new requirements = old non-format requirements + new format-related
1874 # new requirements = old non-format requirements + new format-related
1874 # requirements from the streamed-in repository
1875 # requirements from the streamed-in repository
1875 requirements.update(set(self.requirements) - self.supportedformats)
1876 requirements.update(set(self.requirements) - self.supportedformats)
1876 self._applyrequirements(requirements)
1877 self._applyrequirements(requirements)
1877 self._writerequirements()
1878 self._writerequirements()
1878
1879
1879 self.invalidate()
1880 self.invalidate()
1880 return len(self.heads()) + 1
1881 return len(self.heads()) + 1
1881 finally:
1882 finally:
1882 lock.release()
1883 lock.release()
1883
1884
1884 def clone(self, remote, heads=[], stream=False):
1885 def clone(self, remote, heads=[], stream=False):
1885 '''clone remote repository.
1886 '''clone remote repository.
1886
1887
1887 keyword arguments:
1888 keyword arguments:
1888 heads: list of revs to clone (forces use of pull)
1889 heads: list of revs to clone (forces use of pull)
1889 stream: use streaming clone if possible'''
1890 stream: use streaming clone if possible'''
1890
1891
1891 # now, all clients that can request uncompressed clones can
1892 # now, all clients that can request uncompressed clones can
1892 # read repo formats supported by all servers that can serve
1893 # read repo formats supported by all servers that can serve
1893 # them.
1894 # them.
1894
1895
1895 # if revlog format changes, client will have to check version
1896 # if revlog format changes, client will have to check version
1896 # and format flags on "stream" capability, and use
1897 # and format flags on "stream" capability, and use
1897 # uncompressed only if compatible.
1898 # uncompressed only if compatible.
1898
1899
1899 if stream and not heads:
1900 if stream and not heads:
1900 # 'stream' means remote revlog format is revlogv1 only
1901 # 'stream' means remote revlog format is revlogv1 only
1901 if remote.capable('stream'):
1902 if remote.capable('stream'):
1902 return self.stream_in(remote, set(('revlogv1',)))
1903 return self.stream_in(remote, set(('revlogv1',)))
1903 # otherwise, 'streamreqs' contains the remote revlog format
1904 # otherwise, 'streamreqs' contains the remote revlog format
1904 streamreqs = remote.capable('streamreqs')
1905 streamreqs = remote.capable('streamreqs')
1905 if streamreqs:
1906 if streamreqs:
1906 streamreqs = set(streamreqs.split(','))
1907 streamreqs = set(streamreqs.split(','))
1907 # if we support it, stream in and adjust our requirements
1908 # if we support it, stream in and adjust our requirements
1908 if not streamreqs - self.supportedformats:
1909 if not streamreqs - self.supportedformats:
1909 return self.stream_in(remote, streamreqs)
1910 return self.stream_in(remote, streamreqs)
1910 return self.pull(remote, heads)
1911 return self.pull(remote, heads)
1911
1912
1912 def pushkey(self, namespace, key, old, new):
1913 def pushkey(self, namespace, key, old, new):
1913 return pushkey.push(self, namespace, key, old, new)
1914 return pushkey.push(self, namespace, key, old, new)
1914
1915
1915 def listkeys(self, namespace):
1916 def listkeys(self, namespace):
1916 return pushkey.list(self, namespace)
1917 return pushkey.list(self, namespace)
1917
1918
1918 def debugwireargs(self, one, two, three=None, four=None):
1919 def debugwireargs(self, one, two, three=None, four=None):
1919 '''used to test argument passing over the wire'''
1920 '''used to test argument passing over the wire'''
1920 return "%s %s %s %s" % (one, two, three, four)
1921 return "%s %s %s %s" % (one, two, three, four)
1921
1922
1922 # used to avoid circular references so destructors work
1923 # used to avoid circular references so destructors work
1923 def aftertrans(files):
1924 def aftertrans(files):
1924 renamefiles = [tuple(t) for t in files]
1925 renamefiles = [tuple(t) for t in files]
1925 def a():
1926 def a():
1926 for src, dest in renamefiles:
1927 for src, dest in renamefiles:
1927 util.rename(src, dest)
1928 util.rename(src, dest)
1928 return a
1929 return a
1929
1930
1930 def instance(ui, path, create):
1931 def instance(ui, path, create):
1931 return localrepository(ui, urlmod.localpath(path), create)
1932 return localrepository(ui, urlmod.localpath(path), create)
1932
1933
1933 def islocal(path):
1934 def islocal(path):
1934 return True
1935 return True
@@ -1,166 +1,175 b''
1 $ echo "[extensions]" >> $HGRCPATH
1 $ echo "[extensions]" >> $HGRCPATH
2 $ echo "mq=" >> $HGRCPATH
2 $ echo "mq=" >> $HGRCPATH
3 $ echo "[mq]" >> $HGRCPATH
3 $ echo "[mq]" >> $HGRCPATH
4 $ echo "git=keep" >> $HGRCPATH
4 $ echo "git=keep" >> $HGRCPATH
5
5
6 $ hg init a
6 $ hg init a
7 $ cd a
7 $ cd a
8
8
9 $ echo 'base' > base
9 $ echo 'base' > base
10 $ hg ci -Ambase
10 $ hg ci -Ambase
11 adding base
11 adding base
12
12
13 $ hg qnew -mmqbase mqbase
13 $ hg qnew -mmqbase mqbase
14
14
15 $ echo 'patched' > base
15 $ echo 'patched' > base
16 $ hg qrefresh
16 $ hg qrefresh
17
17
18 qdiff:
18 qdiff:
19
19
20 $ hg qdiff
20 $ hg qdiff
21 diff -r d20a80d4def3 base
21 diff -r d20a80d4def3 base
22 --- a/base Thu Jan 01 00:00:00 1970 +0000
22 --- a/base Thu Jan 01 00:00:00 1970 +0000
23 +++ b/base* (glob)
23 +++ b/base* (glob)
24 @@ -1,1 +1,1 @@
24 @@ -1,1 +1,1 @@
25 -base
25 -base
26 +patched
26 +patched
27
27
28 qdiff dirname:
28 qdiff dirname:
29
29
30 $ hg qdiff --nodates .
30 $ hg qdiff --nodates .
31 diff -r d20a80d4def3 base
31 diff -r d20a80d4def3 base
32 --- a/base
32 --- a/base
33 +++ b/base
33 +++ b/base
34 @@ -1,1 +1,1 @@
34 @@ -1,1 +1,1 @@
35 -base
35 -base
36 +patched
36 +patched
37
37
38 qdiff filename:
38 qdiff filename:
39
39
40 $ hg qdiff --nodates base
40 $ hg qdiff --nodates base
41 diff -r d20a80d4def3 base
41 diff -r d20a80d4def3 base
42 --- a/base
42 --- a/base
43 +++ b/base
43 +++ b/base
44 @@ -1,1 +1,1 @@
44 @@ -1,1 +1,1 @@
45 -base
45 -base
46 +patched
46 +patched
47
47
48 $ hg revert -a
48 $ hg revert -a
49
49
50 $ hg qpop
50 $ hg qpop
51 popping mqbase
51 popping mqbase
52 patch queue now empty
52 patch queue now empty
53
53
54 $ hg qdelete mqbase
54 $ hg qdelete mqbase
55
55
56 $ printf '1\n2\n3\n4\nhello world\ngoodbye world\n7\n8\n9\n' > lines
56 $ printf '1\n2\n3\n4\nhello world\ngoodbye world\n7\n8\n9\n' > lines
57 $ hg ci -Amlines -d '2 0'
57 $ hg ci -Amlines -d '2 0'
58 adding lines
58 adding lines
59
59
60 $ hg qnew -mmqbase2 mqbase2
60 $ hg qnew -mmqbase2 mqbase2
61 $ printf '\n\n1\n2\n3\n4\nhello world\n goodbye world\n7\n8\n9\n' > lines
61 $ printf '\n\n1\n2\n3\n4\nhello world\n goodbye world\n7\n8\n9\n' > lines
62
62
63 $ hg qdiff --nodates -U 1
63 $ hg qdiff --nodates -U 1
64 diff -r b0c220e1cf43 lines
64 diff -r b0c220e1cf43 lines
65 --- a/lines
65 --- a/lines
66 +++ b/lines
66 +++ b/lines
67 @@ -1,1 +1,3 @@
67 @@ -1,1 +1,3 @@
68 +
68 +
69 +
69 +
70 1
70 1
71 @@ -4,4 +6,4 @@
71 @@ -4,4 +6,4 @@
72 4
72 4
73 -hello world
73 -hello world
74 -goodbye world
74 -goodbye world
75 +hello world
75 +hello world
76 + goodbye world
76 + goodbye world
77 7
77 7
78
78
79 $ hg qdiff --nodates -b
79 $ hg qdiff --nodates -b
80 diff -r b0c220e1cf43 lines
80 diff -r b0c220e1cf43 lines
81 --- a/lines
81 --- a/lines
82 +++ b/lines
82 +++ b/lines
83 @@ -1,9 +1,11 @@
83 @@ -1,9 +1,11 @@
84 +
84 +
85 +
85 +
86 1
86 1
87 2
87 2
88 3
88 3
89 4
89 4
90 hello world
90 hello world
91 -goodbye world
91 -goodbye world
92 + goodbye world
92 + goodbye world
93 7
93 7
94 8
94 8
95 9
95 9
96
96
97 $ hg qdiff --nodates -U 1 -B
97 $ hg qdiff --nodates -U 1 -B
98 diff -r b0c220e1cf43 lines
98 diff -r b0c220e1cf43 lines
99 --- a/lines
99 --- a/lines
100 +++ b/lines
100 +++ b/lines
101 @@ -4,4 +6,4 @@
101 @@ -4,4 +6,4 @@
102 4
102 4
103 -hello world
103 -hello world
104 -goodbye world
104 -goodbye world
105 +hello world
105 +hello world
106 + goodbye world
106 + goodbye world
107 7
107 7
108
108
109 $ hg qdiff --nodates -w
109 $ hg qdiff --nodates -w
110 diff -r b0c220e1cf43 lines
110 diff -r b0c220e1cf43 lines
111 --- a/lines
111 --- a/lines
112 +++ b/lines
112 +++ b/lines
113 @@ -1,3 +1,5 @@
113 @@ -1,3 +1,5 @@
114 +
114 +
115 +
115 +
116 1
116 1
117 2
117 2
118 3
118 3
119
119
120 $ hg qdiff --nodates --reverse
120 $ hg qdiff --nodates --reverse
121 diff -r b0c220e1cf43 lines
121 diff -r b0c220e1cf43 lines
122 --- a/lines
122 --- a/lines
123 +++ b/lines
123 +++ b/lines
124 @@ -1,11 +1,9 @@
124 @@ -1,11 +1,9 @@
125 -
125 -
126 -
126 -
127 1
127 1
128 2
128 2
129 3
129 3
130 4
130 4
131 -hello world
131 -hello world
132 - goodbye world
132 - goodbye world
133 +hello world
133 +hello world
134 +goodbye world
134 +goodbye world
135 7
135 7
136 8
136 8
137 9
137 9
138
138
139 qdiff preserve existing git flag:
139 qdiff preserve existing git flag:
140
140
141 $ hg qrefresh --git
141 $ hg qrefresh --git
142 $ echo a >> lines
142 $ echo a >> lines
143 $ hg qdiff
143 $ hg qdiff
144 diff --git a/lines b/lines
144 diff --git a/lines b/lines
145 --- a/lines
145 --- a/lines
146 +++ b/lines
146 +++ b/lines
147 @@ -1,9 +1,12 @@
147 @@ -1,9 +1,12 @@
148 +
148 +
149 +
149 +
150 1
150 1
151 2
151 2
152 3
152 3
153 4
153 4
154 -hello world
154 -hello world
155 -goodbye world
155 -goodbye world
156 +hello world
156 +hello world
157 + goodbye world
157 + goodbye world
158 7
158 7
159 8
159 8
160 9
160 9
161 +a
161 +a
162
162
163 $ hg qdiff --stat
163 $ hg qdiff --stat
164 lines | 7 +++++--
164 lines | 7 +++++--
165 1 files changed, 5 insertions(+), 2 deletions(-)
165 1 files changed, 5 insertions(+), 2 deletions(-)
166 $ hg qrefresh
166
167
168 qdiff when file deleted (but not removed) in working dir:
169
170 $ hg qnew deleted-file
171 $ echo a > newfile
172 $ hg add newfile
173 $ hg qrefresh
174 $ rm newfile
175 $ hg qdiff
General Comments 0
You need to be logged in to leave comments. Login now