##// END OF EJS Templates
subrepo: refuse to commit subrepos if .hgsub is excluded (issue2232)
Matt Mackall -
r11485:b602a95c stable
parent child Browse files
Show More
@@ -1,1869 +1,1874 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared'.split())
24 supported = set('revlogv1 store fncache shared'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 os.mkdir(path)
45 os.mkdir(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 reqfile = self.opener("requires", "w")
58 reqfile = self.opener("requires", "w")
59 for r in requirements:
59 for r in requirements:
60 reqfile.write("%s\n" % r)
60 reqfile.write("%s\n" % r)
61 reqfile.close()
61 reqfile.close()
62 else:
62 else:
63 raise error.RepoError(_("repository %s not found") % path)
63 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
64 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
65 raise error.RepoError(_("repository %s already exists") % path)
66 else:
66 else:
67 # find requirements
67 # find requirements
68 requirements = set()
68 requirements = set()
69 try:
69 try:
70 requirements = set(self.opener("requires").read().splitlines())
70 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
71 except IOError, inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 for r in requirements - self.supported:
74 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
75 raise error.RepoError(_("requirement '%s' not supported") % r)
76
76
77 self.sharedpath = self.path
77 self.sharedpath = self.path
78 try:
78 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
79 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
80 if not os.path.exists(s):
81 raise error.RepoError(
81 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
83 self.sharedpath = s
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87
87
88 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
89 self.spath = self.store.path
90 self.sopener = self.store.opener
90 self.sopener = self.store.opener
91 self.sjoin = self.store.join
91 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
92 self.opener.createmode = self.store.createmode
93 self.sopener.options = {}
93 self.sopener.options = {}
94
94
95 # These two define the set of tags for this repository. _tags
95 # These two define the set of tags for this repository. _tags
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
97 # 'local'. (Global tags are defined by .hgtags across all
97 # 'local'. (Global tags are defined by .hgtags across all
98 # heads, and local tags are defined in .hg/localtags.) They
98 # heads, and local tags are defined in .hg/localtags.) They
99 # constitute the in-memory cache of tags.
99 # constitute the in-memory cache of tags.
100 self._tags = None
100 self._tags = None
101 self._tagtypes = None
101 self._tagtypes = None
102
102
103 self._branchcache = None # in UTF-8
103 self._branchcache = None # in UTF-8
104 self._branchcachetip = None
104 self._branchcachetip = None
105 self.nodetagscache = None
105 self.nodetagscache = None
106 self.filterpats = {}
106 self.filterpats = {}
107 self._datafilters = {}
107 self._datafilters = {}
108 self._transref = self._lockref = self._wlockref = None
108 self._transref = self._lockref = self._wlockref = None
109
109
110 @propertycache
110 @propertycache
111 def changelog(self):
111 def changelog(self):
112 c = changelog.changelog(self.sopener)
112 c = changelog.changelog(self.sopener)
113 if 'HG_PENDING' in os.environ:
113 if 'HG_PENDING' in os.environ:
114 p = os.environ['HG_PENDING']
114 p = os.environ['HG_PENDING']
115 if p.startswith(self.root):
115 if p.startswith(self.root):
116 c.readpending('00changelog.i.a')
116 c.readpending('00changelog.i.a')
117 self.sopener.options['defversion'] = c.version
117 self.sopener.options['defversion'] = c.version
118 return c
118 return c
119
119
120 @propertycache
120 @propertycache
121 def manifest(self):
121 def manifest(self):
122 return manifest.manifest(self.sopener)
122 return manifest.manifest(self.sopener)
123
123
124 @propertycache
124 @propertycache
125 def dirstate(self):
125 def dirstate(self):
126 return dirstate.dirstate(self.opener, self.ui, self.root)
126 return dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def __getitem__(self, changeid):
128 def __getitem__(self, changeid):
129 if changeid is None:
129 if changeid is None:
130 return context.workingctx(self)
130 return context.workingctx(self)
131 return context.changectx(self, changeid)
131 return context.changectx(self, changeid)
132
132
133 def __contains__(self, changeid):
133 def __contains__(self, changeid):
134 try:
134 try:
135 return bool(self.lookup(changeid))
135 return bool(self.lookup(changeid))
136 except error.RepoLookupError:
136 except error.RepoLookupError:
137 return False
137 return False
138
138
139 def __nonzero__(self):
139 def __nonzero__(self):
140 return True
140 return True
141
141
142 def __len__(self):
142 def __len__(self):
143 return len(self.changelog)
143 return len(self.changelog)
144
144
145 def __iter__(self):
145 def __iter__(self):
146 for i in xrange(len(self)):
146 for i in xrange(len(self)):
147 yield i
147 yield i
148
148
149 def url(self):
149 def url(self):
150 return 'file:' + self.root
150 return 'file:' + self.root
151
151
152 def hook(self, name, throw=False, **args):
152 def hook(self, name, throw=False, **args):
153 return hook.hook(self.ui, self, name, throw, **args)
153 return hook.hook(self.ui, self, name, throw, **args)
154
154
155 tag_disallowed = ':\r\n'
155 tag_disallowed = ':\r\n'
156
156
157 def _tag(self, names, node, message, local, user, date, extra={}):
157 def _tag(self, names, node, message, local, user, date, extra={}):
158 if isinstance(names, str):
158 if isinstance(names, str):
159 allchars = names
159 allchars = names
160 names = (names,)
160 names = (names,)
161 else:
161 else:
162 allchars = ''.join(names)
162 allchars = ''.join(names)
163 for c in self.tag_disallowed:
163 for c in self.tag_disallowed:
164 if c in allchars:
164 if c in allchars:
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
166
166
167 branches = self.branchmap()
167 branches = self.branchmap()
168 for name in names:
168 for name in names:
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
170 local=local)
170 local=local)
171 if name in branches:
171 if name in branches:
172 self.ui.warn(_("warning: tag %s conflicts with existing"
172 self.ui.warn(_("warning: tag %s conflicts with existing"
173 " branch name\n") % name)
173 " branch name\n") % name)
174
174
175 def writetags(fp, names, munge, prevtags):
175 def writetags(fp, names, munge, prevtags):
176 fp.seek(0, 2)
176 fp.seek(0, 2)
177 if prevtags and prevtags[-1] != '\n':
177 if prevtags and prevtags[-1] != '\n':
178 fp.write('\n')
178 fp.write('\n')
179 for name in names:
179 for name in names:
180 m = munge and munge(name) or name
180 m = munge and munge(name) or name
181 if self._tagtypes and name in self._tagtypes:
181 if self._tagtypes and name in self._tagtypes:
182 old = self._tags.get(name, nullid)
182 old = self._tags.get(name, nullid)
183 fp.write('%s %s\n' % (hex(old), m))
183 fp.write('%s %s\n' % (hex(old), m))
184 fp.write('%s %s\n' % (hex(node), m))
184 fp.write('%s %s\n' % (hex(node), m))
185 fp.close()
185 fp.close()
186
186
187 prevtags = ''
187 prevtags = ''
188 if local:
188 if local:
189 try:
189 try:
190 fp = self.opener('localtags', 'r+')
190 fp = self.opener('localtags', 'r+')
191 except IOError:
191 except IOError:
192 fp = self.opener('localtags', 'a')
192 fp = self.opener('localtags', 'a')
193 else:
193 else:
194 prevtags = fp.read()
194 prevtags = fp.read()
195
195
196 # local tags are stored in the current charset
196 # local tags are stored in the current charset
197 writetags(fp, names, None, prevtags)
197 writetags(fp, names, None, prevtags)
198 for name in names:
198 for name in names:
199 self.hook('tag', node=hex(node), tag=name, local=local)
199 self.hook('tag', node=hex(node), tag=name, local=local)
200 return
200 return
201
201
202 try:
202 try:
203 fp = self.wfile('.hgtags', 'rb+')
203 fp = self.wfile('.hgtags', 'rb+')
204 except IOError:
204 except IOError:
205 fp = self.wfile('.hgtags', 'ab')
205 fp = self.wfile('.hgtags', 'ab')
206 else:
206 else:
207 prevtags = fp.read()
207 prevtags = fp.read()
208
208
209 # committed tags are stored in UTF-8
209 # committed tags are stored in UTF-8
210 writetags(fp, names, encoding.fromlocal, prevtags)
210 writetags(fp, names, encoding.fromlocal, prevtags)
211
211
212 if '.hgtags' not in self.dirstate:
212 if '.hgtags' not in self.dirstate:
213 self[None].add(['.hgtags'])
213 self[None].add(['.hgtags'])
214
214
215 m = matchmod.exact(self.root, '', ['.hgtags'])
215 m = matchmod.exact(self.root, '', ['.hgtags'])
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
217
217
218 for name in names:
218 for name in names:
219 self.hook('tag', node=hex(node), tag=name, local=local)
219 self.hook('tag', node=hex(node), tag=name, local=local)
220
220
221 return tagnode
221 return tagnode
222
222
223 def tag(self, names, node, message, local, user, date):
223 def tag(self, names, node, message, local, user, date):
224 '''tag a revision with one or more symbolic names.
224 '''tag a revision with one or more symbolic names.
225
225
226 names is a list of strings or, when adding a single tag, names may be a
226 names is a list of strings or, when adding a single tag, names may be a
227 string.
227 string.
228
228
229 if local is True, the tags are stored in a per-repository file.
229 if local is True, the tags are stored in a per-repository file.
230 otherwise, they are stored in the .hgtags file, and a new
230 otherwise, they are stored in the .hgtags file, and a new
231 changeset is committed with the change.
231 changeset is committed with the change.
232
232
233 keyword arguments:
233 keyword arguments:
234
234
235 local: whether to store tags in non-version-controlled file
235 local: whether to store tags in non-version-controlled file
236 (default False)
236 (default False)
237
237
238 message: commit message to use if committing
238 message: commit message to use if committing
239
239
240 user: name of user to use if committing
240 user: name of user to use if committing
241
241
242 date: date tuple to use if committing'''
242 date: date tuple to use if committing'''
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 self.tags() # instantiate the cache
249 self.tags() # instantiate the cache
250 self._tag(names, node, message, local, user, date)
250 self._tag(names, node, message, local, user, date)
251
251
252 def tags(self):
252 def tags(self):
253 '''return a mapping of tag to node'''
253 '''return a mapping of tag to node'''
254 if self._tags is None:
254 if self._tags is None:
255 (self._tags, self._tagtypes) = self._findtags()
255 (self._tags, self._tagtypes) = self._findtags()
256
256
257 return self._tags
257 return self._tags
258
258
259 def _findtags(self):
259 def _findtags(self):
260 '''Do the hard work of finding tags. Return a pair of dicts
260 '''Do the hard work of finding tags. Return a pair of dicts
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
262 maps tag name to a string like \'global\' or \'local\'.
262 maps tag name to a string like \'global\' or \'local\'.
263 Subclasses or extensions are free to add their own tags, but
263 Subclasses or extensions are free to add their own tags, but
264 should be aware that the returned dicts will be retained for the
264 should be aware that the returned dicts will be retained for the
265 duration of the localrepo object.'''
265 duration of the localrepo object.'''
266
266
267 # XXX what tagtype should subclasses/extensions use? Currently
267 # XXX what tagtype should subclasses/extensions use? Currently
268 # mq and bookmarks add tags, but do not set the tagtype at all.
268 # mq and bookmarks add tags, but do not set the tagtype at all.
269 # Should each extension invent its own tag type? Should there
269 # Should each extension invent its own tag type? Should there
270 # be one tagtype for all such "virtual" tags? Or is the status
270 # be one tagtype for all such "virtual" tags? Or is the status
271 # quo fine?
271 # quo fine?
272
272
273 alltags = {} # map tag name to (node, hist)
273 alltags = {} # map tag name to (node, hist)
274 tagtypes = {}
274 tagtypes = {}
275
275
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
278
278
279 # Build the return dicts. Have to re-encode tag names because
279 # Build the return dicts. Have to re-encode tag names because
280 # the tags module always uses UTF-8 (in order not to lose info
280 # the tags module always uses UTF-8 (in order not to lose info
281 # writing to the cache), but the rest of Mercurial wants them in
281 # writing to the cache), but the rest of Mercurial wants them in
282 # local encoding.
282 # local encoding.
283 tags = {}
283 tags = {}
284 for (name, (node, hist)) in alltags.iteritems():
284 for (name, (node, hist)) in alltags.iteritems():
285 if node != nullid:
285 if node != nullid:
286 tags[encoding.tolocal(name)] = node
286 tags[encoding.tolocal(name)] = node
287 tags['tip'] = self.changelog.tip()
287 tags['tip'] = self.changelog.tip()
288 tagtypes = dict([(encoding.tolocal(name), value)
288 tagtypes = dict([(encoding.tolocal(name), value)
289 for (name, value) in tagtypes.iteritems()])
289 for (name, value) in tagtypes.iteritems()])
290 return (tags, tagtypes)
290 return (tags, tagtypes)
291
291
292 def tagtype(self, tagname):
292 def tagtype(self, tagname):
293 '''
293 '''
294 return the type of the given tag. result can be:
294 return the type of the given tag. result can be:
295
295
296 'local' : a local tag
296 'local' : a local tag
297 'global' : a global tag
297 'global' : a global tag
298 None : tag does not exist
298 None : tag does not exist
299 '''
299 '''
300
300
301 self.tags()
301 self.tags()
302
302
303 return self._tagtypes.get(tagname)
303 return self._tagtypes.get(tagname)
304
304
305 def tagslist(self):
305 def tagslist(self):
306 '''return a list of tags ordered by revision'''
306 '''return a list of tags ordered by revision'''
307 l = []
307 l = []
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 try:
309 try:
310 r = self.changelog.rev(n)
310 r = self.changelog.rev(n)
311 except:
311 except:
312 r = -2 # sort to the beginning of the list if unknown
312 r = -2 # sort to the beginning of the list if unknown
313 l.append((r, t, n))
313 l.append((r, t, n))
314 return [(t, n) for r, t, n in sorted(l)]
314 return [(t, n) for r, t, n in sorted(l)]
315
315
316 def nodetags(self, node):
316 def nodetags(self, node):
317 '''return the tags associated with a node'''
317 '''return the tags associated with a node'''
318 if not self.nodetagscache:
318 if not self.nodetagscache:
319 self.nodetagscache = {}
319 self.nodetagscache = {}
320 for t, n in self.tags().iteritems():
320 for t, n in self.tags().iteritems():
321 self.nodetagscache.setdefault(n, []).append(t)
321 self.nodetagscache.setdefault(n, []).append(t)
322 for tags in self.nodetagscache.itervalues():
322 for tags in self.nodetagscache.itervalues():
323 tags.sort()
323 tags.sort()
324 return self.nodetagscache.get(node, [])
324 return self.nodetagscache.get(node, [])
325
325
326 def _branchtags(self, partial, lrev):
326 def _branchtags(self, partial, lrev):
327 # TODO: rename this function?
327 # TODO: rename this function?
328 tiprev = len(self) - 1
328 tiprev = len(self) - 1
329 if lrev != tiprev:
329 if lrev != tiprev:
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
331 self._updatebranchcache(partial, ctxgen)
331 self._updatebranchcache(partial, ctxgen)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
333
333
334 return partial
334 return partial
335
335
336 def branchmap(self):
336 def branchmap(self):
337 '''returns a dictionary {branch: [branchheads]}'''
337 '''returns a dictionary {branch: [branchheads]}'''
338 tip = self.changelog.tip()
338 tip = self.changelog.tip()
339 if self._branchcache is not None and self._branchcachetip == tip:
339 if self._branchcache is not None and self._branchcachetip == tip:
340 return self._branchcache
340 return self._branchcache
341
341
342 oldtip = self._branchcachetip
342 oldtip = self._branchcachetip
343 self._branchcachetip = tip
343 self._branchcachetip = tip
344 if oldtip is None or oldtip not in self.changelog.nodemap:
344 if oldtip is None or oldtip not in self.changelog.nodemap:
345 partial, last, lrev = self._readbranchcache()
345 partial, last, lrev = self._readbranchcache()
346 else:
346 else:
347 lrev = self.changelog.rev(oldtip)
347 lrev = self.changelog.rev(oldtip)
348 partial = self._branchcache
348 partial = self._branchcache
349
349
350 self._branchtags(partial, lrev)
350 self._branchtags(partial, lrev)
351 # this private cache holds all heads (not just tips)
351 # this private cache holds all heads (not just tips)
352 self._branchcache = partial
352 self._branchcache = partial
353
353
354 return self._branchcache
354 return self._branchcache
355
355
356 def branchtags(self):
356 def branchtags(self):
357 '''return a dict where branch names map to the tipmost head of
357 '''return a dict where branch names map to the tipmost head of
358 the branch, open heads come before closed'''
358 the branch, open heads come before closed'''
359 bt = {}
359 bt = {}
360 for bn, heads in self.branchmap().iteritems():
360 for bn, heads in self.branchmap().iteritems():
361 tip = heads[-1]
361 tip = heads[-1]
362 for h in reversed(heads):
362 for h in reversed(heads):
363 if 'close' not in self.changelog.read(h)[5]:
363 if 'close' not in self.changelog.read(h)[5]:
364 tip = h
364 tip = h
365 break
365 break
366 bt[bn] = tip
366 bt[bn] = tip
367 return bt
367 return bt
368
368
369
369
370 def _readbranchcache(self):
370 def _readbranchcache(self):
371 partial = {}
371 partial = {}
372 try:
372 try:
373 f = self.opener("branchheads.cache")
373 f = self.opener("branchheads.cache")
374 lines = f.read().split('\n')
374 lines = f.read().split('\n')
375 f.close()
375 f.close()
376 except (IOError, OSError):
376 except (IOError, OSError):
377 return {}, nullid, nullrev
377 return {}, nullid, nullrev
378
378
379 try:
379 try:
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
382 if lrev >= len(self) or self[lrev].node() != last:
382 if lrev >= len(self) or self[lrev].node() != last:
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
384 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
385 for l in lines:
386 if not l:
386 if not l:
387 continue
387 continue
388 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
389 partial.setdefault(label.strip(), []).append(bin(node))
389 partial.setdefault(label.strip(), []).append(bin(node))
390 except KeyboardInterrupt:
390 except KeyboardInterrupt:
391 raise
391 raise
392 except Exception, inst:
392 except Exception, inst:
393 if self.ui.debugflag:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
396 return partial, last, lrev
397
397
398 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
399 try:
399 try:
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, nodes in branches.iteritems():
402 for label, nodes in branches.iteritems():
403 for node in nodes:
403 for node in nodes:
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 f.rename()
405 f.rename()
406 except (IOError, OSError):
406 except (IOError, OSError):
407 pass
407 pass
408
408
409 def _updatebranchcache(self, partial, ctxgen):
409 def _updatebranchcache(self, partial, ctxgen):
410 # collect new branch entries
410 # collect new branch entries
411 newbranches = {}
411 newbranches = {}
412 for c in ctxgen:
412 for c in ctxgen:
413 newbranches.setdefault(c.branch(), []).append(c.node())
413 newbranches.setdefault(c.branch(), []).append(c.node())
414 # if older branchheads are reachable from new ones, they aren't
414 # if older branchheads are reachable from new ones, they aren't
415 # really branchheads. Note checking parents is insufficient:
415 # really branchheads. Note checking parents is insufficient:
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
417 for branch, newnodes in newbranches.iteritems():
417 for branch, newnodes in newbranches.iteritems():
418 bheads = partial.setdefault(branch, [])
418 bheads = partial.setdefault(branch, [])
419 bheads.extend(newnodes)
419 bheads.extend(newnodes)
420 if len(bheads) <= 1:
420 if len(bheads) <= 1:
421 continue
421 continue
422 # starting from tip means fewer passes over reachable
422 # starting from tip means fewer passes over reachable
423 while newnodes:
423 while newnodes:
424 latest = newnodes.pop()
424 latest = newnodes.pop()
425 if latest not in bheads:
425 if latest not in bheads:
426 continue
426 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
428 reachable = self.changelog.reachable(latest, minbhrev)
429 reachable.remove(latest)
429 reachable.remove(latest)
430 bheads = [b for b in bheads if b not in reachable]
430 bheads = [b for b in bheads if b not in reachable]
431 partial[branch] = bheads
431 partial[branch] = bheads
432
432
433 def lookup(self, key):
433 def lookup(self, key):
434 if isinstance(key, int):
434 if isinstance(key, int):
435 return self.changelog.node(key)
435 return self.changelog.node(key)
436 elif key == '.':
436 elif key == '.':
437 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
438 elif key == 'null':
438 elif key == 'null':
439 return nullid
439 return nullid
440 elif key == 'tip':
440 elif key == 'tip':
441 return self.changelog.tip()
441 return self.changelog.tip()
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452
452
453 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
456 % short(key))
457 try:
457 try:
458 if len(key) == 20:
458 if len(key) == 20:
459 key = hex(key)
459 key = hex(key)
460 except:
460 except:
461 pass
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
463
464 def lookupbranch(self, key, remote=None):
464 def lookupbranch(self, key, remote=None):
465 repo = remote or self
465 repo = remote or self
466 if key in repo.branchmap():
466 if key in repo.branchmap():
467 return key
467 return key
468
468
469 repo = (remote and remote.local()) and remote or self
469 repo = (remote and remote.local()) and remote or self
470 return repo[key].branch()
470 return repo[key].branch()
471
471
472 def local(self):
472 def local(self):
473 return True
473 return True
474
474
475 def join(self, f):
475 def join(self, f):
476 return os.path.join(self.path, f)
476 return os.path.join(self.path, f)
477
477
478 def wjoin(self, f):
478 def wjoin(self, f):
479 return os.path.join(self.root, f)
479 return os.path.join(self.root, f)
480
480
481 def rjoin(self, f):
481 def rjoin(self, f):
482 return os.path.join(self.root, util.pconvert(f))
482 return os.path.join(self.root, util.pconvert(f))
483
483
484 def file(self, f):
484 def file(self, f):
485 if f[0] == '/':
485 if f[0] == '/':
486 f = f[1:]
486 f = f[1:]
487 return filelog.filelog(self.sopener, f)
487 return filelog.filelog(self.sopener, f)
488
488
489 def changectx(self, changeid):
489 def changectx(self, changeid):
490 return self[changeid]
490 return self[changeid]
491
491
492 def parents(self, changeid=None):
492 def parents(self, changeid=None):
493 '''get list of changectxs for parents of changeid'''
493 '''get list of changectxs for parents of changeid'''
494 return self[changeid].parents()
494 return self[changeid].parents()
495
495
496 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
500
500
501 def getcwd(self):
501 def getcwd(self):
502 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
503
503
504 def pathto(self, f, cwd=None):
504 def pathto(self, f, cwd=None):
505 return self.dirstate.pathto(f, cwd)
505 return self.dirstate.pathto(f, cwd)
506
506
507 def wfile(self, f, mode='r'):
507 def wfile(self, f, mode='r'):
508 return self.wopener(f, mode)
508 return self.wopener(f, mode)
509
509
510 def _link(self, f):
510 def _link(self, f):
511 return os.path.islink(self.wjoin(f))
511 return os.path.islink(self.wjoin(f))
512
512
513 def _filter(self, filter, filename, data):
513 def _filter(self, filter, filename, data):
514 if filter not in self.filterpats:
514 if filter not in self.filterpats:
515 l = []
515 l = []
516 for pat, cmd in self.ui.configitems(filter):
516 for pat, cmd in self.ui.configitems(filter):
517 if cmd == '!':
517 if cmd == '!':
518 continue
518 continue
519 mf = matchmod.match(self.root, '', [pat])
519 mf = matchmod.match(self.root, '', [pat])
520 fn = None
520 fn = None
521 params = cmd
521 params = cmd
522 for name, filterfn in self._datafilters.iteritems():
522 for name, filterfn in self._datafilters.iteritems():
523 if cmd.startswith(name):
523 if cmd.startswith(name):
524 fn = filterfn
524 fn = filterfn
525 params = cmd[len(name):].lstrip()
525 params = cmd[len(name):].lstrip()
526 break
526 break
527 if not fn:
527 if not fn:
528 fn = lambda s, c, **kwargs: util.filter(s, c)
528 fn = lambda s, c, **kwargs: util.filter(s, c)
529 # Wrap old filters not supporting keyword arguments
529 # Wrap old filters not supporting keyword arguments
530 if not inspect.getargspec(fn)[2]:
530 if not inspect.getargspec(fn)[2]:
531 oldfn = fn
531 oldfn = fn
532 fn = lambda s, c, **kwargs: oldfn(s, c)
532 fn = lambda s, c, **kwargs: oldfn(s, c)
533 l.append((mf, fn, params))
533 l.append((mf, fn, params))
534 self.filterpats[filter] = l
534 self.filterpats[filter] = l
535
535
536 for mf, fn, cmd in self.filterpats[filter]:
536 for mf, fn, cmd in self.filterpats[filter]:
537 if mf(filename):
537 if mf(filename):
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
540 break
540 break
541
541
542 return data
542 return data
543
543
544 def adddatafilter(self, name, filter):
544 def adddatafilter(self, name, filter):
545 self._datafilters[name] = filter
545 self._datafilters[name] = filter
546
546
547 def wread(self, filename):
547 def wread(self, filename):
548 if self._link(filename):
548 if self._link(filename):
549 data = os.readlink(self.wjoin(filename))
549 data = os.readlink(self.wjoin(filename))
550 else:
550 else:
551 data = self.wopener(filename, 'r').read()
551 data = self.wopener(filename, 'r').read()
552 return self._filter("encode", filename, data)
552 return self._filter("encode", filename, data)
553
553
554 def wwrite(self, filename, data, flags):
554 def wwrite(self, filename, data, flags):
555 data = self._filter("decode", filename, data)
555 data = self._filter("decode", filename, data)
556 try:
556 try:
557 os.unlink(self.wjoin(filename))
557 os.unlink(self.wjoin(filename))
558 except OSError:
558 except OSError:
559 pass
559 pass
560 if 'l' in flags:
560 if 'l' in flags:
561 self.wopener.symlink(data, filename)
561 self.wopener.symlink(data, filename)
562 else:
562 else:
563 self.wopener(filename, 'w').write(data)
563 self.wopener(filename, 'w').write(data)
564 if 'x' in flags:
564 if 'x' in flags:
565 util.set_flags(self.wjoin(filename), False, True)
565 util.set_flags(self.wjoin(filename), False, True)
566
566
567 def wwritedata(self, filename, data):
567 def wwritedata(self, filename, data):
568 return self._filter("decode", filename, data)
568 return self._filter("decode", filename, data)
569
569
570 def transaction(self, desc):
570 def transaction(self, desc):
571 tr = self._transref and self._transref() or None
571 tr = self._transref and self._transref() or None
572 if tr and tr.running():
572 if tr and tr.running():
573 return tr.nest()
573 return tr.nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise error.RepoError(
577 raise error.RepoError(
578 _("abandoned transaction found - run hg recover"))
578 _("abandoned transaction found - run hg recover"))
579
579
580 # save dirstate for rollback
580 # save dirstate for rollback
581 try:
581 try:
582 ds = self.opener("dirstate").read()
582 ds = self.opener("dirstate").read()
583 except IOError:
583 except IOError:
584 ds = ""
584 ds = ""
585 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.dirstate", "w").write(ds)
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
588
588
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.branch"), self.join("undo.branch")),
591 (self.join("journal.branch"), self.join("undo.branch")),
592 (self.join("journal.desc"), self.join("undo.desc"))]
592 (self.join("journal.desc"), self.join("undo.desc"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self.store.createmode)
596 self.store.createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 lock = self.lock()
601 lock = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"),
605 transaction.rollback(self.sopener, self.sjoin("journal"),
606 self.ui.warn)
606 self.ui.warn)
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 lock.release()
613 lock.release()
614
614
615 def rollback(self, dryrun=False):
615 def rollback(self, dryrun=False):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 try:
621 try:
622 args = self.opener("undo.desc", "r").read().splitlines()
622 args = self.opener("undo.desc", "r").read().splitlines()
623 if len(args) >= 3 and self.ui.verbose:
623 if len(args) >= 3 and self.ui.verbose:
624 desc = _("rolling back to revision %s"
624 desc = _("rolling back to revision %s"
625 " (undo %s: %s)\n") % (
625 " (undo %s: %s)\n") % (
626 int(args[0]) - 1, args[1], args[2])
626 int(args[0]) - 1, args[1], args[2])
627 elif len(args) >= 2:
627 elif len(args) >= 2:
628 desc = _("rolling back to revision %s (undo %s)\n") % (
628 desc = _("rolling back to revision %s (undo %s)\n") % (
629 int(args[0]) - 1, args[1])
629 int(args[0]) - 1, args[1])
630 except IOError:
630 except IOError:
631 desc = _("rolling back unknown transaction\n")
631 desc = _("rolling back unknown transaction\n")
632 self.ui.status(desc)
632 self.ui.status(desc)
633 if dryrun:
633 if dryrun:
634 return
634 return
635 transaction.rollback(self.sopener, self.sjoin("undo"),
635 transaction.rollback(self.sopener, self.sjoin("undo"),
636 self.ui.warn)
636 self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
638 try:
639 branch = self.opener("undo.branch").read()
639 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
640 self.dirstate.setbranch(branch)
641 except IOError:
641 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
642 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
643 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
644 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
645 self.invalidate()
646 self.dirstate.invalidate()
646 self.dirstate.invalidate()
647 self.destroyed()
647 self.destroyed()
648 else:
648 else:
649 self.ui.warn(_("no rollback information available\n"))
649 self.ui.warn(_("no rollback information available\n"))
650 return 1
650 return 1
651 finally:
651 finally:
652 release(lock, wlock)
652 release(lock, wlock)
653
653
654 def invalidatecaches(self):
654 def invalidatecaches(self):
655 self._tags = None
655 self._tags = None
656 self._tagtypes = None
656 self._tagtypes = None
657 self.nodetagscache = None
657 self.nodetagscache = None
658 self._branchcache = None # in UTF-8
658 self._branchcache = None # in UTF-8
659 self._branchcachetip = None
659 self._branchcachetip = None
660
660
661 def invalidate(self):
661 def invalidate(self):
662 for a in "changelog manifest".split():
662 for a in "changelog manifest".split():
663 if a in self.__dict__:
663 if a in self.__dict__:
664 delattr(self, a)
664 delattr(self, a)
665 self.invalidatecaches()
665 self.invalidatecaches()
666
666
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
668 try:
668 try:
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
670 except error.LockHeld, inst:
670 except error.LockHeld, inst:
671 if not wait:
671 if not wait:
672 raise
672 raise
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
674 (desc, inst.locker))
674 (desc, inst.locker))
675 # default to 600 seconds timeout
675 # default to 600 seconds timeout
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
677 releasefn, desc=desc)
677 releasefn, desc=desc)
678 if acquirefn:
678 if acquirefn:
679 acquirefn()
679 acquirefn()
680 return l
680 return l
681
681
682 def lock(self, wait=True):
682 def lock(self, wait=True):
683 '''Lock the repository store (.hg/store) and return a weak reference
683 '''Lock the repository store (.hg/store) and return a weak reference
684 to the lock. Use this before modifying the store (e.g. committing or
684 to the lock. Use this before modifying the store (e.g. committing or
685 stripping). If you are opening a transaction, get a lock as well.)'''
685 stripping). If you are opening a transaction, get a lock as well.)'''
686 l = self._lockref and self._lockref()
686 l = self._lockref and self._lockref()
687 if l is not None and l.held:
687 if l is not None and l.held:
688 l.lock()
688 l.lock()
689 return l
689 return l
690
690
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 _('repository %s') % self.origroot)
692 _('repository %s') % self.origroot)
693 self._lockref = weakref.ref(l)
693 self._lockref = weakref.ref(l)
694 return l
694 return l
695
695
696 def wlock(self, wait=True):
696 def wlock(self, wait=True):
697 '''Lock the non-store parts of the repository (everything under
697 '''Lock the non-store parts of the repository (everything under
698 .hg except .hg/store) and return a weak reference to the lock.
698 .hg except .hg/store) and return a weak reference to the lock.
699 Use this before modifying files in .hg.'''
699 Use this before modifying files in .hg.'''
700 l = self._wlockref and self._wlockref()
700 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
701 if l is not None and l.held:
702 l.lock()
702 l.lock()
703 return l
703 return l
704
704
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
706 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
707 self.origroot)
708 self._wlockref = weakref.ref(l)
708 self._wlockref = weakref.ref(l)
709 return l
709 return l
710
710
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 """
712 """
713 commit an individual file as part of a larger transaction
713 commit an individual file as part of a larger transaction
714 """
714 """
715
715
716 fname = fctx.path()
716 fname = fctx.path()
717 text = fctx.data()
717 text = fctx.data()
718 flog = self.file(fname)
718 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
719 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
721
722 meta = {}
722 meta = {}
723 copy = fctx.renamed()
723 copy = fctx.renamed()
724 if copy and copy[0] != fname:
724 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
725 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
726 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
727 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
728 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
729 # and the second one will be the other parent. For example:
730 #
730 #
731 # 0 --- 1 --- 3 rev1 changes file foo
731 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
732 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
733 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
734 # should record that bar descends from
735 # bar in rev2 and foo in rev1
735 # bar in rev2 and foo in rev1
736 #
736 #
737 # this allows this merge to succeed:
737 # this allows this merge to succeed:
738 #
738 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
741 # \- 2 --- 4 as the merge base
742 #
742 #
743
743
744 cfname = copy[0]
744 cfname = copy[0]
745 crev = manifest1.get(cfname)
745 crev = manifest1.get(cfname)
746 newfparent = fparent2
746 newfparent = fparent2
747
747
748 if manifest2: # branch merge
748 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
749 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
750 if cfname in manifest2:
751 crev = manifest2[cfname]
751 crev = manifest2[cfname]
752 newfparent = fparent1
752 newfparent = fparent1
753
753
754 # find source in nearest ancestor if we've lost track
754 # find source in nearest ancestor if we've lost track
755 if not crev:
755 if not crev:
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
757 (fname, cfname))
757 (fname, cfname))
758 for ancestor in self['.'].ancestors():
758 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
759 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
760 crev = ancestor[cfname].filenode()
761 break
761 break
762
762
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
764 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
765 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
766 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
767 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
768 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
769 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
770 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
771 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
772 elif fparentancestor == fparent2:
773 fparent2 = nullid
773 fparent2 = nullid
774
774
775 # is the file changed?
775 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
777 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
779
780 # are just the flags changed during merge?
780 # are just the flags changed during merge?
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 changelist.append(fname)
782 changelist.append(fname)
783
783
784 return fparent1
784 return fparent1
785
785
786 def commit(self, text="", user=None, date=None, match=None, force=False,
786 def commit(self, text="", user=None, date=None, match=None, force=False,
787 editor=False, extra={}):
787 editor=False, extra={}):
788 """Add a new revision to current repository.
788 """Add a new revision to current repository.
789
789
790 Revision information is gathered from the working directory,
790 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
791 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
792 supplied, it is called to get a commit message.
793 """
793 """
794
794
795 def fail(f, msg):
795 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
796 raise util.Abort('%s: %s' % (f, msg))
797
797
798 if not match:
798 if not match:
799 match = matchmod.always(self.root, '')
799 match = matchmod.always(self.root, '')
800
800
801 if not force:
801 if not force:
802 vdirs = []
802 vdirs = []
803 match.dir = vdirs.append
803 match.dir = vdirs.append
804 match.bad = fail
804 match.bad = fail
805
805
806 wlock = self.wlock()
806 wlock = self.wlock()
807 try:
807 try:
808 wctx = self[None]
808 wctx = self[None]
809 merge = len(wctx.parents()) > 1
809 merge = len(wctx.parents()) > 1
810
810
811 if (not force and merge and match and
811 if (not force and merge and match and
812 (match.files() or match.anypats())):
812 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
813 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
814 '(do not specify files or patterns)'))
815
815
816 changes = self.status(match=match, clean=force)
816 changes = self.status(match=match, clean=force)
817 if force:
817 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
818 changes[0].extend(changes[6]) # mq may commit unchanged files
819
819
820 # check subrepos
820 # check subrepos
821 subs = []
821 subs = []
822 removedsubs = set()
822 removedsubs = set()
823 for p in wctx.parents():
823 for p in wctx.parents():
824 removedsubs.update(s for s in p.substate if match(s))
824 removedsubs.update(s for s in p.substate if match(s))
825 for s in wctx.substate:
825 for s in wctx.substate:
826 removedsubs.discard(s)
826 removedsubs.discard(s)
827 if match(s) and wctx.sub(s).dirty():
827 if match(s) and wctx.sub(s).dirty():
828 subs.append(s)
828 subs.append(s)
829 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
829 if (subs or removedsubs):
830 # is hgsub modified and not included?
831 if (not match('.hgsub') and
832 '.hgsub' in (wctx.modified() + wctx.added())):
833 raise util.Abort("can't commit subrepos without .hgsub")
834 if '.hgsubstate' not in changes[0]:
830 changes[0].insert(0, '.hgsubstate')
835 changes[0].insert(0, '.hgsubstate')
831
836
832 # make sure all explicit patterns are matched
837 # make sure all explicit patterns are matched
833 if not force and match.files():
838 if not force and match.files():
834 matched = set(changes[0] + changes[1] + changes[2])
839 matched = set(changes[0] + changes[1] + changes[2])
835
840
836 for f in match.files():
841 for f in match.files():
837 if f == '.' or f in matched or f in wctx.substate:
842 if f == '.' or f in matched or f in wctx.substate:
838 continue
843 continue
839 if f in changes[3]: # missing
844 if f in changes[3]: # missing
840 fail(f, _('file not found!'))
845 fail(f, _('file not found!'))
841 if f in vdirs: # visited directory
846 if f in vdirs: # visited directory
842 d = f + '/'
847 d = f + '/'
843 for mf in matched:
848 for mf in matched:
844 if mf.startswith(d):
849 if mf.startswith(d):
845 break
850 break
846 else:
851 else:
847 fail(f, _("no match under directory!"))
852 fail(f, _("no match under directory!"))
848 elif f not in self.dirstate:
853 elif f not in self.dirstate:
849 fail(f, _("file not tracked!"))
854 fail(f, _("file not tracked!"))
850
855
851 if (not force and not extra.get("close") and not merge
856 if (not force and not extra.get("close") and not merge
852 and not (changes[0] or changes[1] or changes[2])
857 and not (changes[0] or changes[1] or changes[2])
853 and wctx.branch() == wctx.p1().branch()):
858 and wctx.branch() == wctx.p1().branch()):
854 return None
859 return None
855
860
856 ms = mergemod.mergestate(self)
861 ms = mergemod.mergestate(self)
857 for f in changes[0]:
862 for f in changes[0]:
858 if f in ms and ms[f] == 'u':
863 if f in ms and ms[f] == 'u':
859 raise util.Abort(_("unresolved merge conflicts "
864 raise util.Abort(_("unresolved merge conflicts "
860 "(see hg resolve)"))
865 "(see hg resolve)"))
861
866
862 cctx = context.workingctx(self, text, user, date, extra, changes)
867 cctx = context.workingctx(self, text, user, date, extra, changes)
863 if editor:
868 if editor:
864 cctx._text = editor(self, cctx, subs)
869 cctx._text = editor(self, cctx, subs)
865 edited = (text != cctx._text)
870 edited = (text != cctx._text)
866
871
867 # commit subs
872 # commit subs
868 if subs or removedsubs:
873 if subs or removedsubs:
869 state = wctx.substate.copy()
874 state = wctx.substate.copy()
870 for s in subs:
875 for s in subs:
871 sub = wctx.sub(s)
876 sub = wctx.sub(s)
872 self.ui.status(_('committing subrepository %s\n') %
877 self.ui.status(_('committing subrepository %s\n') %
873 subrepo.relpath(sub))
878 subrepo.relpath(sub))
874 sr = sub.commit(cctx._text, user, date)
879 sr = sub.commit(cctx._text, user, date)
875 state[s] = (state[s][0], sr)
880 state[s] = (state[s][0], sr)
876 subrepo.writestate(self, state)
881 subrepo.writestate(self, state)
877
882
878 # Save commit message in case this transaction gets rolled back
883 # Save commit message in case this transaction gets rolled back
879 # (e.g. by a pretxncommit hook). Leave the content alone on
884 # (e.g. by a pretxncommit hook). Leave the content alone on
880 # the assumption that the user will use the same editor again.
885 # the assumption that the user will use the same editor again.
881 msgfile = self.opener('last-message.txt', 'wb')
886 msgfile = self.opener('last-message.txt', 'wb')
882 msgfile.write(cctx._text)
887 msgfile.write(cctx._text)
883 msgfile.close()
888 msgfile.close()
884
889
885 p1, p2 = self.dirstate.parents()
890 p1, p2 = self.dirstate.parents()
886 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
891 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
887 try:
892 try:
888 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
893 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
889 ret = self.commitctx(cctx, True)
894 ret = self.commitctx(cctx, True)
890 except:
895 except:
891 if edited:
896 if edited:
892 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
897 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
893 self.ui.write(
898 self.ui.write(
894 _('note: commit message saved in %s\n') % msgfn)
899 _('note: commit message saved in %s\n') % msgfn)
895 raise
900 raise
896
901
897 # update dirstate and mergestate
902 # update dirstate and mergestate
898 for f in changes[0] + changes[1]:
903 for f in changes[0] + changes[1]:
899 self.dirstate.normal(f)
904 self.dirstate.normal(f)
900 for f in changes[2]:
905 for f in changes[2]:
901 self.dirstate.forget(f)
906 self.dirstate.forget(f)
902 self.dirstate.setparents(ret)
907 self.dirstate.setparents(ret)
903 ms.reset()
908 ms.reset()
904 finally:
909 finally:
905 wlock.release()
910 wlock.release()
906
911
907 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
912 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
908 return ret
913 return ret
909
914
910 def commitctx(self, ctx, error=False):
915 def commitctx(self, ctx, error=False):
911 """Add a new revision to current repository.
916 """Add a new revision to current repository.
912 Revision information is passed via the context argument.
917 Revision information is passed via the context argument.
913 """
918 """
914
919
915 tr = lock = None
920 tr = lock = None
916 removed = ctx.removed()
921 removed = ctx.removed()
917 p1, p2 = ctx.p1(), ctx.p2()
922 p1, p2 = ctx.p1(), ctx.p2()
918 m1 = p1.manifest().copy()
923 m1 = p1.manifest().copy()
919 m2 = p2.manifest()
924 m2 = p2.manifest()
920 user = ctx.user()
925 user = ctx.user()
921
926
922 lock = self.lock()
927 lock = self.lock()
923 try:
928 try:
924 tr = self.transaction("commit")
929 tr = self.transaction("commit")
925 trp = weakref.proxy(tr)
930 trp = weakref.proxy(tr)
926
931
927 # check in files
932 # check in files
928 new = {}
933 new = {}
929 changed = []
934 changed = []
930 linkrev = len(self)
935 linkrev = len(self)
931 for f in sorted(ctx.modified() + ctx.added()):
936 for f in sorted(ctx.modified() + ctx.added()):
932 self.ui.note(f + "\n")
937 self.ui.note(f + "\n")
933 try:
938 try:
934 fctx = ctx[f]
939 fctx = ctx[f]
935 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
940 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
936 changed)
941 changed)
937 m1.set(f, fctx.flags())
942 m1.set(f, fctx.flags())
938 except OSError, inst:
943 except OSError, inst:
939 self.ui.warn(_("trouble committing %s!\n") % f)
944 self.ui.warn(_("trouble committing %s!\n") % f)
940 raise
945 raise
941 except IOError, inst:
946 except IOError, inst:
942 errcode = getattr(inst, 'errno', errno.ENOENT)
947 errcode = getattr(inst, 'errno', errno.ENOENT)
943 if error or errcode and errcode != errno.ENOENT:
948 if error or errcode and errcode != errno.ENOENT:
944 self.ui.warn(_("trouble committing %s!\n") % f)
949 self.ui.warn(_("trouble committing %s!\n") % f)
945 raise
950 raise
946 else:
951 else:
947 removed.append(f)
952 removed.append(f)
948
953
949 # update manifest
954 # update manifest
950 m1.update(new)
955 m1.update(new)
951 removed = [f for f in sorted(removed) if f in m1 or f in m2]
956 removed = [f for f in sorted(removed) if f in m1 or f in m2]
952 drop = [f for f in removed if f in m1]
957 drop = [f for f in removed if f in m1]
953 for f in drop:
958 for f in drop:
954 del m1[f]
959 del m1[f]
955 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
960 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
956 p2.manifestnode(), (new, drop))
961 p2.manifestnode(), (new, drop))
957
962
958 # update changelog
963 # update changelog
959 self.changelog.delayupdate()
964 self.changelog.delayupdate()
960 n = self.changelog.add(mn, changed + removed, ctx.description(),
965 n = self.changelog.add(mn, changed + removed, ctx.description(),
961 trp, p1.node(), p2.node(),
966 trp, p1.node(), p2.node(),
962 user, ctx.date(), ctx.extra().copy())
967 user, ctx.date(), ctx.extra().copy())
963 p = lambda: self.changelog.writepending() and self.root or ""
968 p = lambda: self.changelog.writepending() and self.root or ""
964 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
969 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
965 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
970 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
966 parent2=xp2, pending=p)
971 parent2=xp2, pending=p)
967 self.changelog.finalize(trp)
972 self.changelog.finalize(trp)
968 tr.close()
973 tr.close()
969
974
970 if self._branchcache:
975 if self._branchcache:
971 self.branchtags()
976 self.branchtags()
972 return n
977 return n
973 finally:
978 finally:
974 if tr:
979 if tr:
975 tr.release()
980 tr.release()
976 lock.release()
981 lock.release()
977
982
978 def destroyed(self):
983 def destroyed(self):
979 '''Inform the repository that nodes have been destroyed.
984 '''Inform the repository that nodes have been destroyed.
980 Intended for use by strip and rollback, so there's a common
985 Intended for use by strip and rollback, so there's a common
981 place for anything that has to be done after destroying history.'''
986 place for anything that has to be done after destroying history.'''
982 # XXX it might be nice if we could take the list of destroyed
987 # XXX it might be nice if we could take the list of destroyed
983 # nodes, but I don't see an easy way for rollback() to do that
988 # nodes, but I don't see an easy way for rollback() to do that
984
989
985 # Ensure the persistent tag cache is updated. Doing it now
990 # Ensure the persistent tag cache is updated. Doing it now
986 # means that the tag cache only has to worry about destroyed
991 # means that the tag cache only has to worry about destroyed
987 # heads immediately after a strip/rollback. That in turn
992 # heads immediately after a strip/rollback. That in turn
988 # guarantees that "cachetip == currenttip" (comparing both rev
993 # guarantees that "cachetip == currenttip" (comparing both rev
989 # and node) always means no nodes have been added or destroyed.
994 # and node) always means no nodes have been added or destroyed.
990
995
991 # XXX this is suboptimal when qrefresh'ing: we strip the current
996 # XXX this is suboptimal when qrefresh'ing: we strip the current
992 # head, refresh the tag cache, then immediately add a new head.
997 # head, refresh the tag cache, then immediately add a new head.
993 # But I think doing it this way is necessary for the "instant
998 # But I think doing it this way is necessary for the "instant
994 # tag cache retrieval" case to work.
999 # tag cache retrieval" case to work.
995 self.invalidatecaches()
1000 self.invalidatecaches()
996
1001
997 def walk(self, match, node=None):
1002 def walk(self, match, node=None):
998 '''
1003 '''
999 walk recursively through the directory tree or a given
1004 walk recursively through the directory tree or a given
1000 changeset, finding all files matched by the match
1005 changeset, finding all files matched by the match
1001 function
1006 function
1002 '''
1007 '''
1003 return self[node].walk(match)
1008 return self[node].walk(match)
1004
1009
1005 def status(self, node1='.', node2=None, match=None,
1010 def status(self, node1='.', node2=None, match=None,
1006 ignored=False, clean=False, unknown=False):
1011 ignored=False, clean=False, unknown=False):
1007 """return status of files between two nodes or node and working directory
1012 """return status of files between two nodes or node and working directory
1008
1013
1009 If node1 is None, use the first dirstate parent instead.
1014 If node1 is None, use the first dirstate parent instead.
1010 If node2 is None, compare node1 with working directory.
1015 If node2 is None, compare node1 with working directory.
1011 """
1016 """
1012
1017
1013 def mfmatches(ctx):
1018 def mfmatches(ctx):
1014 mf = ctx.manifest().copy()
1019 mf = ctx.manifest().copy()
1015 for fn in mf.keys():
1020 for fn in mf.keys():
1016 if not match(fn):
1021 if not match(fn):
1017 del mf[fn]
1022 del mf[fn]
1018 return mf
1023 return mf
1019
1024
1020 if isinstance(node1, context.changectx):
1025 if isinstance(node1, context.changectx):
1021 ctx1 = node1
1026 ctx1 = node1
1022 else:
1027 else:
1023 ctx1 = self[node1]
1028 ctx1 = self[node1]
1024 if isinstance(node2, context.changectx):
1029 if isinstance(node2, context.changectx):
1025 ctx2 = node2
1030 ctx2 = node2
1026 else:
1031 else:
1027 ctx2 = self[node2]
1032 ctx2 = self[node2]
1028
1033
1029 working = ctx2.rev() is None
1034 working = ctx2.rev() is None
1030 parentworking = working and ctx1 == self['.']
1035 parentworking = working and ctx1 == self['.']
1031 match = match or matchmod.always(self.root, self.getcwd())
1036 match = match or matchmod.always(self.root, self.getcwd())
1032 listignored, listclean, listunknown = ignored, clean, unknown
1037 listignored, listclean, listunknown = ignored, clean, unknown
1033
1038
1034 # load earliest manifest first for caching reasons
1039 # load earliest manifest first for caching reasons
1035 if not working and ctx2.rev() < ctx1.rev():
1040 if not working and ctx2.rev() < ctx1.rev():
1036 ctx2.manifest()
1041 ctx2.manifest()
1037
1042
1038 if not parentworking:
1043 if not parentworking:
1039 def bad(f, msg):
1044 def bad(f, msg):
1040 if f not in ctx1:
1045 if f not in ctx1:
1041 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1046 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1042 match.bad = bad
1047 match.bad = bad
1043
1048
1044 if working: # we need to scan the working dir
1049 if working: # we need to scan the working dir
1045 subrepos = []
1050 subrepos = []
1046 if '.hgsub' in self.dirstate:
1051 if '.hgsub' in self.dirstate:
1047 subrepos = ctx1.substate.keys()
1052 subrepos = ctx1.substate.keys()
1048 s = self.dirstate.status(match, subrepos, listignored,
1053 s = self.dirstate.status(match, subrepos, listignored,
1049 listclean, listunknown)
1054 listclean, listunknown)
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1055 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1051
1056
1052 # check for any possibly clean files
1057 # check for any possibly clean files
1053 if parentworking and cmp:
1058 if parentworking and cmp:
1054 fixup = []
1059 fixup = []
1055 # do a full compare of any files that might have changed
1060 # do a full compare of any files that might have changed
1056 for f in sorted(cmp):
1061 for f in sorted(cmp):
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1062 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1058 or ctx1[f].cmp(ctx2[f].data())):
1063 or ctx1[f].cmp(ctx2[f].data())):
1059 modified.append(f)
1064 modified.append(f)
1060 else:
1065 else:
1061 fixup.append(f)
1066 fixup.append(f)
1062
1067
1063 if listclean:
1068 if listclean:
1064 clean += fixup
1069 clean += fixup
1065
1070
1066 # update dirstate for files that are actually clean
1071 # update dirstate for files that are actually clean
1067 if fixup:
1072 if fixup:
1068 try:
1073 try:
1069 # updating the dirstate is optional
1074 # updating the dirstate is optional
1070 # so we don't wait on the lock
1075 # so we don't wait on the lock
1071 wlock = self.wlock(False)
1076 wlock = self.wlock(False)
1072 try:
1077 try:
1073 for f in fixup:
1078 for f in fixup:
1074 self.dirstate.normal(f)
1079 self.dirstate.normal(f)
1075 finally:
1080 finally:
1076 wlock.release()
1081 wlock.release()
1077 except error.LockError:
1082 except error.LockError:
1078 pass
1083 pass
1079
1084
1080 if not parentworking:
1085 if not parentworking:
1081 mf1 = mfmatches(ctx1)
1086 mf1 = mfmatches(ctx1)
1082 if working:
1087 if working:
1083 # we are comparing working dir against non-parent
1088 # we are comparing working dir against non-parent
1084 # generate a pseudo-manifest for the working dir
1089 # generate a pseudo-manifest for the working dir
1085 mf2 = mfmatches(self['.'])
1090 mf2 = mfmatches(self['.'])
1086 for f in cmp + modified + added:
1091 for f in cmp + modified + added:
1087 mf2[f] = None
1092 mf2[f] = None
1088 mf2.set(f, ctx2.flags(f))
1093 mf2.set(f, ctx2.flags(f))
1089 for f in removed:
1094 for f in removed:
1090 if f in mf2:
1095 if f in mf2:
1091 del mf2[f]
1096 del mf2[f]
1092 else:
1097 else:
1093 # we are comparing two revisions
1098 # we are comparing two revisions
1094 deleted, unknown, ignored = [], [], []
1099 deleted, unknown, ignored = [], [], []
1095 mf2 = mfmatches(ctx2)
1100 mf2 = mfmatches(ctx2)
1096
1101
1097 modified, added, clean = [], [], []
1102 modified, added, clean = [], [], []
1098 for fn in mf2:
1103 for fn in mf2:
1099 if fn in mf1:
1104 if fn in mf1:
1100 if (mf1.flags(fn) != mf2.flags(fn) or
1105 if (mf1.flags(fn) != mf2.flags(fn) or
1101 (mf1[fn] != mf2[fn] and
1106 (mf1[fn] != mf2[fn] and
1102 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1107 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1103 modified.append(fn)
1108 modified.append(fn)
1104 elif listclean:
1109 elif listclean:
1105 clean.append(fn)
1110 clean.append(fn)
1106 del mf1[fn]
1111 del mf1[fn]
1107 else:
1112 else:
1108 added.append(fn)
1113 added.append(fn)
1109 removed = mf1.keys()
1114 removed = mf1.keys()
1110
1115
1111 r = modified, added, removed, deleted, unknown, ignored, clean
1116 r = modified, added, removed, deleted, unknown, ignored, clean
1112 [l.sort() for l in r]
1117 [l.sort() for l in r]
1113 return r
1118 return r
1114
1119
1115 def heads(self, start=None):
1120 def heads(self, start=None):
1116 heads = self.changelog.heads(start)
1121 heads = self.changelog.heads(start)
1117 # sort the output in rev descending order
1122 # sort the output in rev descending order
1118 heads = [(-self.changelog.rev(h), h) for h in heads]
1123 heads = [(-self.changelog.rev(h), h) for h in heads]
1119 return [n for (r, n) in sorted(heads)]
1124 return [n for (r, n) in sorted(heads)]
1120
1125
1121 def branchheads(self, branch=None, start=None, closed=False):
1126 def branchheads(self, branch=None, start=None, closed=False):
1122 '''return a (possibly filtered) list of heads for the given branch
1127 '''return a (possibly filtered) list of heads for the given branch
1123
1128
1124 Heads are returned in topological order, from newest to oldest.
1129 Heads are returned in topological order, from newest to oldest.
1125 If branch is None, use the dirstate branch.
1130 If branch is None, use the dirstate branch.
1126 If start is not None, return only heads reachable from start.
1131 If start is not None, return only heads reachable from start.
1127 If closed is True, return heads that are marked as closed as well.
1132 If closed is True, return heads that are marked as closed as well.
1128 '''
1133 '''
1129 if branch is None:
1134 if branch is None:
1130 branch = self[None].branch()
1135 branch = self[None].branch()
1131 branches = self.branchmap()
1136 branches = self.branchmap()
1132 if branch not in branches:
1137 if branch not in branches:
1133 return []
1138 return []
1134 # the cache returns heads ordered lowest to highest
1139 # the cache returns heads ordered lowest to highest
1135 bheads = list(reversed(branches[branch]))
1140 bheads = list(reversed(branches[branch]))
1136 if start is not None:
1141 if start is not None:
1137 # filter out the heads that cannot be reached from startrev
1142 # filter out the heads that cannot be reached from startrev
1138 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1143 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1139 bheads = [h for h in bheads if h in fbheads]
1144 bheads = [h for h in bheads if h in fbheads]
1140 if not closed:
1145 if not closed:
1141 bheads = [h for h in bheads if
1146 bheads = [h for h in bheads if
1142 ('close' not in self.changelog.read(h)[5])]
1147 ('close' not in self.changelog.read(h)[5])]
1143 return bheads
1148 return bheads
1144
1149
1145 def branches(self, nodes):
1150 def branches(self, nodes):
1146 if not nodes:
1151 if not nodes:
1147 nodes = [self.changelog.tip()]
1152 nodes = [self.changelog.tip()]
1148 b = []
1153 b = []
1149 for n in nodes:
1154 for n in nodes:
1150 t = n
1155 t = n
1151 while 1:
1156 while 1:
1152 p = self.changelog.parents(n)
1157 p = self.changelog.parents(n)
1153 if p[1] != nullid or p[0] == nullid:
1158 if p[1] != nullid or p[0] == nullid:
1154 b.append((t, n, p[0], p[1]))
1159 b.append((t, n, p[0], p[1]))
1155 break
1160 break
1156 n = p[0]
1161 n = p[0]
1157 return b
1162 return b
1158
1163
1159 def between(self, pairs):
1164 def between(self, pairs):
1160 r = []
1165 r = []
1161
1166
1162 for top, bottom in pairs:
1167 for top, bottom in pairs:
1163 n, l, i = top, [], 0
1168 n, l, i = top, [], 0
1164 f = 1
1169 f = 1
1165
1170
1166 while n != bottom and n != nullid:
1171 while n != bottom and n != nullid:
1167 p = self.changelog.parents(n)[0]
1172 p = self.changelog.parents(n)[0]
1168 if i == f:
1173 if i == f:
1169 l.append(n)
1174 l.append(n)
1170 f = f * 2
1175 f = f * 2
1171 n = p
1176 n = p
1172 i += 1
1177 i += 1
1173
1178
1174 r.append(l)
1179 r.append(l)
1175
1180
1176 return r
1181 return r
1177
1182
1178 def pull(self, remote, heads=None, force=False):
1183 def pull(self, remote, heads=None, force=False):
1179 lock = self.lock()
1184 lock = self.lock()
1180 try:
1185 try:
1181 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1186 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1182 force=force)
1187 force=force)
1183 common, fetch, rheads = tmp
1188 common, fetch, rheads = tmp
1184 if not fetch:
1189 if not fetch:
1185 self.ui.status(_("no changes found\n"))
1190 self.ui.status(_("no changes found\n"))
1186 return 0
1191 return 0
1187
1192
1188 if fetch == [nullid]:
1193 if fetch == [nullid]:
1189 self.ui.status(_("requesting all changes\n"))
1194 self.ui.status(_("requesting all changes\n"))
1190 elif heads is None and remote.capable('changegroupsubset'):
1195 elif heads is None and remote.capable('changegroupsubset'):
1191 # issue1320, avoid a race if remote changed after discovery
1196 # issue1320, avoid a race if remote changed after discovery
1192 heads = rheads
1197 heads = rheads
1193
1198
1194 if heads is None:
1199 if heads is None:
1195 cg = remote.changegroup(fetch, 'pull')
1200 cg = remote.changegroup(fetch, 'pull')
1196 else:
1201 else:
1197 if not remote.capable('changegroupsubset'):
1202 if not remote.capable('changegroupsubset'):
1198 raise util.Abort(_("Partial pull cannot be done because "
1203 raise util.Abort(_("Partial pull cannot be done because "
1199 "other repository doesn't support "
1204 "other repository doesn't support "
1200 "changegroupsubset."))
1205 "changegroupsubset."))
1201 cg = remote.changegroupsubset(fetch, heads, 'pull')
1206 cg = remote.changegroupsubset(fetch, heads, 'pull')
1202 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1207 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1203 finally:
1208 finally:
1204 lock.release()
1209 lock.release()
1205
1210
1206 def push(self, remote, force=False, revs=None, newbranch=False):
1211 def push(self, remote, force=False, revs=None, newbranch=False):
1207 '''Push outgoing changesets (limited by revs) from the current
1212 '''Push outgoing changesets (limited by revs) from the current
1208 repository to remote. Return an integer:
1213 repository to remote. Return an integer:
1209 - 0 means HTTP error *or* nothing to push
1214 - 0 means HTTP error *or* nothing to push
1210 - 1 means we pushed and remote head count is unchanged *or*
1215 - 1 means we pushed and remote head count is unchanged *or*
1211 we have outgoing changesets but refused to push
1216 we have outgoing changesets but refused to push
1212 - other values as described by addchangegroup()
1217 - other values as described by addchangegroup()
1213 '''
1218 '''
1214 # there are two ways to push to remote repo:
1219 # there are two ways to push to remote repo:
1215 #
1220 #
1216 # addchangegroup assumes local user can lock remote
1221 # addchangegroup assumes local user can lock remote
1217 # repo (local filesystem, old ssh servers).
1222 # repo (local filesystem, old ssh servers).
1218 #
1223 #
1219 # unbundle assumes local user cannot lock remote repo (new ssh
1224 # unbundle assumes local user cannot lock remote repo (new ssh
1220 # servers, http servers).
1225 # servers, http servers).
1221
1226
1222 if remote.capable('unbundle'):
1227 if remote.capable('unbundle'):
1223 return self.push_unbundle(remote, force, revs, newbranch)
1228 return self.push_unbundle(remote, force, revs, newbranch)
1224 return self.push_addchangegroup(remote, force, revs, newbranch)
1229 return self.push_addchangegroup(remote, force, revs, newbranch)
1225
1230
1226 def push_addchangegroup(self, remote, force, revs, newbranch):
1231 def push_addchangegroup(self, remote, force, revs, newbranch):
1227 '''Push a changegroup by locking the remote and sending the
1232 '''Push a changegroup by locking the remote and sending the
1228 addchangegroup command to it. Used for local and old SSH repos.
1233 addchangegroup command to it. Used for local and old SSH repos.
1229 Return an integer: see push().
1234 Return an integer: see push().
1230 '''
1235 '''
1231 lock = remote.lock()
1236 lock = remote.lock()
1232 try:
1237 try:
1233 ret = discovery.prepush(self, remote, force, revs, newbranch)
1238 ret = discovery.prepush(self, remote, force, revs, newbranch)
1234 if ret[0] is not None:
1239 if ret[0] is not None:
1235 cg, remote_heads = ret
1240 cg, remote_heads = ret
1236 # we return an integer indicating remote head count change
1241 # we return an integer indicating remote head count change
1237 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1242 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1238 # and here we return 0 for "nothing to push" or 1 for
1243 # and here we return 0 for "nothing to push" or 1 for
1239 # "something to push but I refuse"
1244 # "something to push but I refuse"
1240 return ret[1]
1245 return ret[1]
1241 finally:
1246 finally:
1242 lock.release()
1247 lock.release()
1243
1248
1244 def push_unbundle(self, remote, force, revs, newbranch):
1249 def push_unbundle(self, remote, force, revs, newbranch):
1245 '''Push a changegroup by unbundling it on the remote. Used for new
1250 '''Push a changegroup by unbundling it on the remote. Used for new
1246 SSH and HTTP repos. Return an integer: see push().'''
1251 SSH and HTTP repos. Return an integer: see push().'''
1247 # local repo finds heads on server, finds out what revs it
1252 # local repo finds heads on server, finds out what revs it
1248 # must push. once revs transferred, if server finds it has
1253 # must push. once revs transferred, if server finds it has
1249 # different heads (someone else won commit/push race), server
1254 # different heads (someone else won commit/push race), server
1250 # aborts.
1255 # aborts.
1251
1256
1252 ret = discovery.prepush(self, remote, force, revs, newbranch)
1257 ret = discovery.prepush(self, remote, force, revs, newbranch)
1253 if ret[0] is not None:
1258 if ret[0] is not None:
1254 cg, remote_heads = ret
1259 cg, remote_heads = ret
1255 if force:
1260 if force:
1256 remote_heads = ['force']
1261 remote_heads = ['force']
1257 # ssh: return remote's addchangegroup()
1262 # ssh: return remote's addchangegroup()
1258 # http: return remote's addchangegroup() or 0 for error
1263 # http: return remote's addchangegroup() or 0 for error
1259 return remote.unbundle(cg, remote_heads, 'push')
1264 return remote.unbundle(cg, remote_heads, 'push')
1260 # as in push_addchangegroup()
1265 # as in push_addchangegroup()
1261 return ret[1]
1266 return ret[1]
1262
1267
1263 def changegroupinfo(self, nodes, source):
1268 def changegroupinfo(self, nodes, source):
1264 if self.ui.verbose or source == 'bundle':
1269 if self.ui.verbose or source == 'bundle':
1265 self.ui.status(_("%d changesets found\n") % len(nodes))
1270 self.ui.status(_("%d changesets found\n") % len(nodes))
1266 if self.ui.debugflag:
1271 if self.ui.debugflag:
1267 self.ui.debug("list of changesets:\n")
1272 self.ui.debug("list of changesets:\n")
1268 for node in nodes:
1273 for node in nodes:
1269 self.ui.debug("%s\n" % hex(node))
1274 self.ui.debug("%s\n" % hex(node))
1270
1275
1271 def changegroupsubset(self, bases, heads, source, extranodes=None):
1276 def changegroupsubset(self, bases, heads, source, extranodes=None):
1272 """Compute a changegroup consisting of all the nodes that are
1277 """Compute a changegroup consisting of all the nodes that are
1273 descendents of any of the bases and ancestors of any of the heads.
1278 descendents of any of the bases and ancestors of any of the heads.
1274 Return a chunkbuffer object whose read() method will return
1279 Return a chunkbuffer object whose read() method will return
1275 successive changegroup chunks.
1280 successive changegroup chunks.
1276
1281
1277 It is fairly complex as determining which filenodes and which
1282 It is fairly complex as determining which filenodes and which
1278 manifest nodes need to be included for the changeset to be complete
1283 manifest nodes need to be included for the changeset to be complete
1279 is non-trivial.
1284 is non-trivial.
1280
1285
1281 Another wrinkle is doing the reverse, figuring out which changeset in
1286 Another wrinkle is doing the reverse, figuring out which changeset in
1282 the changegroup a particular filenode or manifestnode belongs to.
1287 the changegroup a particular filenode or manifestnode belongs to.
1283
1288
1284 The caller can specify some nodes that must be included in the
1289 The caller can specify some nodes that must be included in the
1285 changegroup using the extranodes argument. It should be a dict
1290 changegroup using the extranodes argument. It should be a dict
1286 where the keys are the filenames (or 1 for the manifest), and the
1291 where the keys are the filenames (or 1 for the manifest), and the
1287 values are lists of (node, linknode) tuples, where node is a wanted
1292 values are lists of (node, linknode) tuples, where node is a wanted
1288 node and linknode is the changelog node that should be transmitted as
1293 node and linknode is the changelog node that should be transmitted as
1289 the linkrev.
1294 the linkrev.
1290 """
1295 """
1291
1296
1292 # Set up some initial variables
1297 # Set up some initial variables
1293 # Make it easy to refer to self.changelog
1298 # Make it easy to refer to self.changelog
1294 cl = self.changelog
1299 cl = self.changelog
1295 # msng is short for missing - compute the list of changesets in this
1300 # msng is short for missing - compute the list of changesets in this
1296 # changegroup.
1301 # changegroup.
1297 if not bases:
1302 if not bases:
1298 bases = [nullid]
1303 bases = [nullid]
1299 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1304 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1300
1305
1301 if extranodes is None:
1306 if extranodes is None:
1302 # can we go through the fast path ?
1307 # can we go through the fast path ?
1303 heads.sort()
1308 heads.sort()
1304 allheads = self.heads()
1309 allheads = self.heads()
1305 allheads.sort()
1310 allheads.sort()
1306 if heads == allheads:
1311 if heads == allheads:
1307 return self._changegroup(msng_cl_lst, source)
1312 return self._changegroup(msng_cl_lst, source)
1308
1313
1309 # slow path
1314 # slow path
1310 self.hook('preoutgoing', throw=True, source=source)
1315 self.hook('preoutgoing', throw=True, source=source)
1311
1316
1312 self.changegroupinfo(msng_cl_lst, source)
1317 self.changegroupinfo(msng_cl_lst, source)
1313 # Some bases may turn out to be superfluous, and some heads may be
1318 # Some bases may turn out to be superfluous, and some heads may be
1314 # too. nodesbetween will return the minimal set of bases and heads
1319 # too. nodesbetween will return the minimal set of bases and heads
1315 # necessary to re-create the changegroup.
1320 # necessary to re-create the changegroup.
1316
1321
1317 # Known heads are the list of heads that it is assumed the recipient
1322 # Known heads are the list of heads that it is assumed the recipient
1318 # of this changegroup will know about.
1323 # of this changegroup will know about.
1319 knownheads = set()
1324 knownheads = set()
1320 # We assume that all parents of bases are known heads.
1325 # We assume that all parents of bases are known heads.
1321 for n in bases:
1326 for n in bases:
1322 knownheads.update(cl.parents(n))
1327 knownheads.update(cl.parents(n))
1323 knownheads.discard(nullid)
1328 knownheads.discard(nullid)
1324 knownheads = list(knownheads)
1329 knownheads = list(knownheads)
1325 if knownheads:
1330 if knownheads:
1326 # Now that we know what heads are known, we can compute which
1331 # Now that we know what heads are known, we can compute which
1327 # changesets are known. The recipient must know about all
1332 # changesets are known. The recipient must know about all
1328 # changesets required to reach the known heads from the null
1333 # changesets required to reach the known heads from the null
1329 # changeset.
1334 # changeset.
1330 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1335 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1331 junk = None
1336 junk = None
1332 # Transform the list into a set.
1337 # Transform the list into a set.
1333 has_cl_set = set(has_cl_set)
1338 has_cl_set = set(has_cl_set)
1334 else:
1339 else:
1335 # If there were no known heads, the recipient cannot be assumed to
1340 # If there were no known heads, the recipient cannot be assumed to
1336 # know about any changesets.
1341 # know about any changesets.
1337 has_cl_set = set()
1342 has_cl_set = set()
1338
1343
1339 # Make it easy to refer to self.manifest
1344 # Make it easy to refer to self.manifest
1340 mnfst = self.manifest
1345 mnfst = self.manifest
1341 # We don't know which manifests are missing yet
1346 # We don't know which manifests are missing yet
1342 msng_mnfst_set = {}
1347 msng_mnfst_set = {}
1343 # Nor do we know which filenodes are missing.
1348 # Nor do we know which filenodes are missing.
1344 msng_filenode_set = {}
1349 msng_filenode_set = {}
1345
1350
1346 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1351 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1347 junk = None
1352 junk = None
1348
1353
1349 # A changeset always belongs to itself, so the changenode lookup
1354 # A changeset always belongs to itself, so the changenode lookup
1350 # function for a changenode is identity.
1355 # function for a changenode is identity.
1351 def identity(x):
1356 def identity(x):
1352 return x
1357 return x
1353
1358
1354 # If we determine that a particular file or manifest node must be a
1359 # If we determine that a particular file or manifest node must be a
1355 # node that the recipient of the changegroup will already have, we can
1360 # node that the recipient of the changegroup will already have, we can
1356 # also assume the recipient will have all the parents. This function
1361 # also assume the recipient will have all the parents. This function
1357 # prunes them from the set of missing nodes.
1362 # prunes them from the set of missing nodes.
1358 def prune_parents(revlog, hasset, msngset):
1363 def prune_parents(revlog, hasset, msngset):
1359 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1364 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1360 msngset.pop(revlog.node(r), None)
1365 msngset.pop(revlog.node(r), None)
1361
1366
1362 # Use the information collected in collect_manifests_and_files to say
1367 # Use the information collected in collect_manifests_and_files to say
1363 # which changenode any manifestnode belongs to.
1368 # which changenode any manifestnode belongs to.
1364 def lookup_manifest_link(mnfstnode):
1369 def lookup_manifest_link(mnfstnode):
1365 return msng_mnfst_set[mnfstnode]
1370 return msng_mnfst_set[mnfstnode]
1366
1371
1367 # A function generating function that sets up the initial environment
1372 # A function generating function that sets up the initial environment
1368 # the inner function.
1373 # the inner function.
1369 def filenode_collector(changedfiles):
1374 def filenode_collector(changedfiles):
1370 # This gathers information from each manifestnode included in the
1375 # This gathers information from each manifestnode included in the
1371 # changegroup about which filenodes the manifest node references
1376 # changegroup about which filenodes the manifest node references
1372 # so we can include those in the changegroup too.
1377 # so we can include those in the changegroup too.
1373 #
1378 #
1374 # It also remembers which changenode each filenode belongs to. It
1379 # It also remembers which changenode each filenode belongs to. It
1375 # does this by assuming the a filenode belongs to the changenode
1380 # does this by assuming the a filenode belongs to the changenode
1376 # the first manifest that references it belongs to.
1381 # the first manifest that references it belongs to.
1377 def collect_msng_filenodes(mnfstnode):
1382 def collect_msng_filenodes(mnfstnode):
1378 r = mnfst.rev(mnfstnode)
1383 r = mnfst.rev(mnfstnode)
1379 if r - 1 in mnfst.parentrevs(r):
1384 if r - 1 in mnfst.parentrevs(r):
1380 # If the previous rev is one of the parents,
1385 # If the previous rev is one of the parents,
1381 # we only need to see a diff.
1386 # we only need to see a diff.
1382 deltamf = mnfst.readdelta(mnfstnode)
1387 deltamf = mnfst.readdelta(mnfstnode)
1383 # For each line in the delta
1388 # For each line in the delta
1384 for f, fnode in deltamf.iteritems():
1389 for f, fnode in deltamf.iteritems():
1385 f = changedfiles.get(f, None)
1390 f = changedfiles.get(f, None)
1386 # And if the file is in the list of files we care
1391 # And if the file is in the list of files we care
1387 # about.
1392 # about.
1388 if f is not None:
1393 if f is not None:
1389 # Get the changenode this manifest belongs to
1394 # Get the changenode this manifest belongs to
1390 clnode = msng_mnfst_set[mnfstnode]
1395 clnode = msng_mnfst_set[mnfstnode]
1391 # Create the set of filenodes for the file if
1396 # Create the set of filenodes for the file if
1392 # there isn't one already.
1397 # there isn't one already.
1393 ndset = msng_filenode_set.setdefault(f, {})
1398 ndset = msng_filenode_set.setdefault(f, {})
1394 # And set the filenode's changelog node to the
1399 # And set the filenode's changelog node to the
1395 # manifest's if it hasn't been set already.
1400 # manifest's if it hasn't been set already.
1396 ndset.setdefault(fnode, clnode)
1401 ndset.setdefault(fnode, clnode)
1397 else:
1402 else:
1398 # Otherwise we need a full manifest.
1403 # Otherwise we need a full manifest.
1399 m = mnfst.read(mnfstnode)
1404 m = mnfst.read(mnfstnode)
1400 # For every file in we care about.
1405 # For every file in we care about.
1401 for f in changedfiles:
1406 for f in changedfiles:
1402 fnode = m.get(f, None)
1407 fnode = m.get(f, None)
1403 # If it's in the manifest
1408 # If it's in the manifest
1404 if fnode is not None:
1409 if fnode is not None:
1405 # See comments above.
1410 # See comments above.
1406 clnode = msng_mnfst_set[mnfstnode]
1411 clnode = msng_mnfst_set[mnfstnode]
1407 ndset = msng_filenode_set.setdefault(f, {})
1412 ndset = msng_filenode_set.setdefault(f, {})
1408 ndset.setdefault(fnode, clnode)
1413 ndset.setdefault(fnode, clnode)
1409 return collect_msng_filenodes
1414 return collect_msng_filenodes
1410
1415
1411 # We have a list of filenodes we think we need for a file, lets remove
1416 # We have a list of filenodes we think we need for a file, lets remove
1412 # all those we know the recipient must have.
1417 # all those we know the recipient must have.
1413 def prune_filenodes(f, filerevlog):
1418 def prune_filenodes(f, filerevlog):
1414 msngset = msng_filenode_set[f]
1419 msngset = msng_filenode_set[f]
1415 hasset = set()
1420 hasset = set()
1416 # If a 'missing' filenode thinks it belongs to a changenode we
1421 # If a 'missing' filenode thinks it belongs to a changenode we
1417 # assume the recipient must have, then the recipient must have
1422 # assume the recipient must have, then the recipient must have
1418 # that filenode.
1423 # that filenode.
1419 for n in msngset:
1424 for n in msngset:
1420 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1425 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1421 if clnode in has_cl_set:
1426 if clnode in has_cl_set:
1422 hasset.add(n)
1427 hasset.add(n)
1423 prune_parents(filerevlog, hasset, msngset)
1428 prune_parents(filerevlog, hasset, msngset)
1424
1429
1425 # A function generator function that sets up the a context for the
1430 # A function generator function that sets up the a context for the
1426 # inner function.
1431 # inner function.
1427 def lookup_filenode_link_func(fname):
1432 def lookup_filenode_link_func(fname):
1428 msngset = msng_filenode_set[fname]
1433 msngset = msng_filenode_set[fname]
1429 # Lookup the changenode the filenode belongs to.
1434 # Lookup the changenode the filenode belongs to.
1430 def lookup_filenode_link(fnode):
1435 def lookup_filenode_link(fnode):
1431 return msngset[fnode]
1436 return msngset[fnode]
1432 return lookup_filenode_link
1437 return lookup_filenode_link
1433
1438
1434 # Add the nodes that were explicitly requested.
1439 # Add the nodes that were explicitly requested.
1435 def add_extra_nodes(name, nodes):
1440 def add_extra_nodes(name, nodes):
1436 if not extranodes or name not in extranodes:
1441 if not extranodes or name not in extranodes:
1437 return
1442 return
1438
1443
1439 for node, linknode in extranodes[name]:
1444 for node, linknode in extranodes[name]:
1440 if node not in nodes:
1445 if node not in nodes:
1441 nodes[node] = linknode
1446 nodes[node] = linknode
1442
1447
1443 # Now that we have all theses utility functions to help out and
1448 # Now that we have all theses utility functions to help out and
1444 # logically divide up the task, generate the group.
1449 # logically divide up the task, generate the group.
1445 def gengroup():
1450 def gengroup():
1446 # The set of changed files starts empty.
1451 # The set of changed files starts empty.
1447 changedfiles = {}
1452 changedfiles = {}
1448 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1453 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1449
1454
1450 # Create a changenode group generator that will call our functions
1455 # Create a changenode group generator that will call our functions
1451 # back to lookup the owning changenode and collect information.
1456 # back to lookup the owning changenode and collect information.
1452 group = cl.group(msng_cl_lst, identity, collect)
1457 group = cl.group(msng_cl_lst, identity, collect)
1453 cnt = 0
1458 cnt = 0
1454 for chnk in group:
1459 for chnk in group:
1455 yield chnk
1460 yield chnk
1456 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1461 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1457 cnt += 1
1462 cnt += 1
1458 self.ui.progress(_('bundling changes'), None)
1463 self.ui.progress(_('bundling changes'), None)
1459
1464
1460
1465
1461 # Figure out which manifest nodes (of the ones we think might be
1466 # Figure out which manifest nodes (of the ones we think might be
1462 # part of the changegroup) the recipient must know about and
1467 # part of the changegroup) the recipient must know about and
1463 # remove them from the changegroup.
1468 # remove them from the changegroup.
1464 has_mnfst_set = set()
1469 has_mnfst_set = set()
1465 for n in msng_mnfst_set:
1470 for n in msng_mnfst_set:
1466 # If a 'missing' manifest thinks it belongs to a changenode
1471 # If a 'missing' manifest thinks it belongs to a changenode
1467 # the recipient is assumed to have, obviously the recipient
1472 # the recipient is assumed to have, obviously the recipient
1468 # must have that manifest.
1473 # must have that manifest.
1469 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1474 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1470 if linknode in has_cl_set:
1475 if linknode in has_cl_set:
1471 has_mnfst_set.add(n)
1476 has_mnfst_set.add(n)
1472 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1477 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1473 add_extra_nodes(1, msng_mnfst_set)
1478 add_extra_nodes(1, msng_mnfst_set)
1474 msng_mnfst_lst = msng_mnfst_set.keys()
1479 msng_mnfst_lst = msng_mnfst_set.keys()
1475 # Sort the manifestnodes by revision number.
1480 # Sort the manifestnodes by revision number.
1476 msng_mnfst_lst.sort(key=mnfst.rev)
1481 msng_mnfst_lst.sort(key=mnfst.rev)
1477 # Create a generator for the manifestnodes that calls our lookup
1482 # Create a generator for the manifestnodes that calls our lookup
1478 # and data collection functions back.
1483 # and data collection functions back.
1479 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1484 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1480 filenode_collector(changedfiles))
1485 filenode_collector(changedfiles))
1481 cnt = 0
1486 cnt = 0
1482 for chnk in group:
1487 for chnk in group:
1483 yield chnk
1488 yield chnk
1484 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1489 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1485 cnt += 1
1490 cnt += 1
1486 self.ui.progress(_('bundling manifests'), None)
1491 self.ui.progress(_('bundling manifests'), None)
1487
1492
1488 # These are no longer needed, dereference and toss the memory for
1493 # These are no longer needed, dereference and toss the memory for
1489 # them.
1494 # them.
1490 msng_mnfst_lst = None
1495 msng_mnfst_lst = None
1491 msng_mnfst_set.clear()
1496 msng_mnfst_set.clear()
1492
1497
1493 if extranodes:
1498 if extranodes:
1494 for fname in extranodes:
1499 for fname in extranodes:
1495 if isinstance(fname, int):
1500 if isinstance(fname, int):
1496 continue
1501 continue
1497 msng_filenode_set.setdefault(fname, {})
1502 msng_filenode_set.setdefault(fname, {})
1498 changedfiles[fname] = 1
1503 changedfiles[fname] = 1
1499 # Go through all our files in order sorted by name.
1504 # Go through all our files in order sorted by name.
1500 cnt = 0
1505 cnt = 0
1501 for fname in sorted(changedfiles):
1506 for fname in sorted(changedfiles):
1502 filerevlog = self.file(fname)
1507 filerevlog = self.file(fname)
1503 if not len(filerevlog):
1508 if not len(filerevlog):
1504 raise util.Abort(_("empty or missing revlog for %s") % fname)
1509 raise util.Abort(_("empty or missing revlog for %s") % fname)
1505 # Toss out the filenodes that the recipient isn't really
1510 # Toss out the filenodes that the recipient isn't really
1506 # missing.
1511 # missing.
1507 if fname in msng_filenode_set:
1512 if fname in msng_filenode_set:
1508 prune_filenodes(fname, filerevlog)
1513 prune_filenodes(fname, filerevlog)
1509 add_extra_nodes(fname, msng_filenode_set[fname])
1514 add_extra_nodes(fname, msng_filenode_set[fname])
1510 msng_filenode_lst = msng_filenode_set[fname].keys()
1515 msng_filenode_lst = msng_filenode_set[fname].keys()
1511 else:
1516 else:
1512 msng_filenode_lst = []
1517 msng_filenode_lst = []
1513 # If any filenodes are left, generate the group for them,
1518 # If any filenodes are left, generate the group for them,
1514 # otherwise don't bother.
1519 # otherwise don't bother.
1515 if len(msng_filenode_lst) > 0:
1520 if len(msng_filenode_lst) > 0:
1516 yield changegroup.chunkheader(len(fname))
1521 yield changegroup.chunkheader(len(fname))
1517 yield fname
1522 yield fname
1518 # Sort the filenodes by their revision #
1523 # Sort the filenodes by their revision #
1519 msng_filenode_lst.sort(key=filerevlog.rev)
1524 msng_filenode_lst.sort(key=filerevlog.rev)
1520 # Create a group generator and only pass in a changenode
1525 # Create a group generator and only pass in a changenode
1521 # lookup function as we need to collect no information
1526 # lookup function as we need to collect no information
1522 # from filenodes.
1527 # from filenodes.
1523 group = filerevlog.group(msng_filenode_lst,
1528 group = filerevlog.group(msng_filenode_lst,
1524 lookup_filenode_link_func(fname))
1529 lookup_filenode_link_func(fname))
1525 for chnk in group:
1530 for chnk in group:
1526 self.ui.progress(
1531 self.ui.progress(
1527 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1532 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1528 cnt += 1
1533 cnt += 1
1529 yield chnk
1534 yield chnk
1530 if fname in msng_filenode_set:
1535 if fname in msng_filenode_set:
1531 # Don't need this anymore, toss it to free memory.
1536 # Don't need this anymore, toss it to free memory.
1532 del msng_filenode_set[fname]
1537 del msng_filenode_set[fname]
1533 # Signal that no more groups are left.
1538 # Signal that no more groups are left.
1534 yield changegroup.closechunk()
1539 yield changegroup.closechunk()
1535 self.ui.progress(_('bundling files'), None)
1540 self.ui.progress(_('bundling files'), None)
1536
1541
1537 if msng_cl_lst:
1542 if msng_cl_lst:
1538 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1543 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1539
1544
1540 return util.chunkbuffer(gengroup())
1545 return util.chunkbuffer(gengroup())
1541
1546
1542 def changegroup(self, basenodes, source):
1547 def changegroup(self, basenodes, source):
1543 # to avoid a race we use changegroupsubset() (issue1320)
1548 # to avoid a race we use changegroupsubset() (issue1320)
1544 return self.changegroupsubset(basenodes, self.heads(), source)
1549 return self.changegroupsubset(basenodes, self.heads(), source)
1545
1550
1546 def _changegroup(self, nodes, source):
1551 def _changegroup(self, nodes, source):
1547 """Compute the changegroup of all nodes that we have that a recipient
1552 """Compute the changegroup of all nodes that we have that a recipient
1548 doesn't. Return a chunkbuffer object whose read() method will return
1553 doesn't. Return a chunkbuffer object whose read() method will return
1549 successive changegroup chunks.
1554 successive changegroup chunks.
1550
1555
1551 This is much easier than the previous function as we can assume that
1556 This is much easier than the previous function as we can assume that
1552 the recipient has any changenode we aren't sending them.
1557 the recipient has any changenode we aren't sending them.
1553
1558
1554 nodes is the set of nodes to send"""
1559 nodes is the set of nodes to send"""
1555
1560
1556 self.hook('preoutgoing', throw=True, source=source)
1561 self.hook('preoutgoing', throw=True, source=source)
1557
1562
1558 cl = self.changelog
1563 cl = self.changelog
1559 revset = set([cl.rev(n) for n in nodes])
1564 revset = set([cl.rev(n) for n in nodes])
1560 self.changegroupinfo(nodes, source)
1565 self.changegroupinfo(nodes, source)
1561
1566
1562 def identity(x):
1567 def identity(x):
1563 return x
1568 return x
1564
1569
1565 def gennodelst(log):
1570 def gennodelst(log):
1566 for r in log:
1571 for r in log:
1567 if log.linkrev(r) in revset:
1572 if log.linkrev(r) in revset:
1568 yield log.node(r)
1573 yield log.node(r)
1569
1574
1570 def lookuprevlink_func(revlog):
1575 def lookuprevlink_func(revlog):
1571 def lookuprevlink(n):
1576 def lookuprevlink(n):
1572 return cl.node(revlog.linkrev(revlog.rev(n)))
1577 return cl.node(revlog.linkrev(revlog.rev(n)))
1573 return lookuprevlink
1578 return lookuprevlink
1574
1579
1575 def gengroup():
1580 def gengroup():
1576 '''yield a sequence of changegroup chunks (strings)'''
1581 '''yield a sequence of changegroup chunks (strings)'''
1577 # construct a list of all changed files
1582 # construct a list of all changed files
1578 changedfiles = {}
1583 changedfiles = {}
1579 mmfs = {}
1584 mmfs = {}
1580 collect = changegroup.collector(cl, mmfs, changedfiles)
1585 collect = changegroup.collector(cl, mmfs, changedfiles)
1581
1586
1582 cnt = 0
1587 cnt = 0
1583 for chnk in cl.group(nodes, identity, collect):
1588 for chnk in cl.group(nodes, identity, collect):
1584 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1589 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1585 cnt += 1
1590 cnt += 1
1586 yield chnk
1591 yield chnk
1587 self.ui.progress(_('bundling changes'), None)
1592 self.ui.progress(_('bundling changes'), None)
1588
1593
1589 mnfst = self.manifest
1594 mnfst = self.manifest
1590 nodeiter = gennodelst(mnfst)
1595 nodeiter = gennodelst(mnfst)
1591 cnt = 0
1596 cnt = 0
1592 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1597 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1593 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1598 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1594 cnt += 1
1599 cnt += 1
1595 yield chnk
1600 yield chnk
1596 self.ui.progress(_('bundling manifests'), None)
1601 self.ui.progress(_('bundling manifests'), None)
1597
1602
1598 cnt = 0
1603 cnt = 0
1599 for fname in sorted(changedfiles):
1604 for fname in sorted(changedfiles):
1600 filerevlog = self.file(fname)
1605 filerevlog = self.file(fname)
1601 if not len(filerevlog):
1606 if not len(filerevlog):
1602 raise util.Abort(_("empty or missing revlog for %s") % fname)
1607 raise util.Abort(_("empty or missing revlog for %s") % fname)
1603 nodeiter = gennodelst(filerevlog)
1608 nodeiter = gennodelst(filerevlog)
1604 nodeiter = list(nodeiter)
1609 nodeiter = list(nodeiter)
1605 if nodeiter:
1610 if nodeiter:
1606 yield changegroup.chunkheader(len(fname))
1611 yield changegroup.chunkheader(len(fname))
1607 yield fname
1612 yield fname
1608 lookup = lookuprevlink_func(filerevlog)
1613 lookup = lookuprevlink_func(filerevlog)
1609 for chnk in filerevlog.group(nodeiter, lookup):
1614 for chnk in filerevlog.group(nodeiter, lookup):
1610 self.ui.progress(
1615 self.ui.progress(
1611 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1616 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1612 cnt += 1
1617 cnt += 1
1613 yield chnk
1618 yield chnk
1614 self.ui.progress(_('bundling files'), None)
1619 self.ui.progress(_('bundling files'), None)
1615
1620
1616 yield changegroup.closechunk()
1621 yield changegroup.closechunk()
1617
1622
1618 if nodes:
1623 if nodes:
1619 self.hook('outgoing', node=hex(nodes[0]), source=source)
1624 self.hook('outgoing', node=hex(nodes[0]), source=source)
1620
1625
1621 return util.chunkbuffer(gengroup())
1626 return util.chunkbuffer(gengroup())
1622
1627
1623 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1628 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1624 """Add the changegroup returned by source.read() to this repo.
1629 """Add the changegroup returned by source.read() to this repo.
1625 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1630 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1626 the URL of the repo where this changegroup is coming from.
1631 the URL of the repo where this changegroup is coming from.
1627
1632
1628 Return an integer summarizing the change to this repo:
1633 Return an integer summarizing the change to this repo:
1629 - nothing changed or no source: 0
1634 - nothing changed or no source: 0
1630 - more heads than before: 1+added heads (2..n)
1635 - more heads than before: 1+added heads (2..n)
1631 - fewer heads than before: -1-removed heads (-2..-n)
1636 - fewer heads than before: -1-removed heads (-2..-n)
1632 - number of heads stays the same: 1
1637 - number of heads stays the same: 1
1633 """
1638 """
1634 def csmap(x):
1639 def csmap(x):
1635 self.ui.debug("add changeset %s\n" % short(x))
1640 self.ui.debug("add changeset %s\n" % short(x))
1636 return len(cl)
1641 return len(cl)
1637
1642
1638 def revmap(x):
1643 def revmap(x):
1639 return cl.rev(x)
1644 return cl.rev(x)
1640
1645
1641 if not source:
1646 if not source:
1642 return 0
1647 return 0
1643
1648
1644 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1649 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1645
1650
1646 changesets = files = revisions = 0
1651 changesets = files = revisions = 0
1647 efiles = set()
1652 efiles = set()
1648
1653
1649 # write changelog data to temp files so concurrent readers will not see
1654 # write changelog data to temp files so concurrent readers will not see
1650 # inconsistent view
1655 # inconsistent view
1651 cl = self.changelog
1656 cl = self.changelog
1652 cl.delayupdate()
1657 cl.delayupdate()
1653 oldheads = len(cl.heads())
1658 oldheads = len(cl.heads())
1654
1659
1655 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1660 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1656 try:
1661 try:
1657 trp = weakref.proxy(tr)
1662 trp = weakref.proxy(tr)
1658 # pull off the changeset group
1663 # pull off the changeset group
1659 self.ui.status(_("adding changesets\n"))
1664 self.ui.status(_("adding changesets\n"))
1660 clstart = len(cl)
1665 clstart = len(cl)
1661 class prog(object):
1666 class prog(object):
1662 step = _('changesets')
1667 step = _('changesets')
1663 count = 1
1668 count = 1
1664 ui = self.ui
1669 ui = self.ui
1665 total = None
1670 total = None
1666 def __call__(self):
1671 def __call__(self):
1667 self.ui.progress(self.step, self.count, unit=_('chunks'),
1672 self.ui.progress(self.step, self.count, unit=_('chunks'),
1668 total=self.total)
1673 total=self.total)
1669 self.count += 1
1674 self.count += 1
1670 pr = prog()
1675 pr = prog()
1671 chunkiter = changegroup.chunkiter(source, progress=pr)
1676 chunkiter = changegroup.chunkiter(source, progress=pr)
1672 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1677 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1673 raise util.Abort(_("received changelog group is empty"))
1678 raise util.Abort(_("received changelog group is empty"))
1674 clend = len(cl)
1679 clend = len(cl)
1675 changesets = clend - clstart
1680 changesets = clend - clstart
1676 for c in xrange(clstart, clend):
1681 for c in xrange(clstart, clend):
1677 efiles.update(self[c].files())
1682 efiles.update(self[c].files())
1678 efiles = len(efiles)
1683 efiles = len(efiles)
1679 self.ui.progress(_('changesets'), None)
1684 self.ui.progress(_('changesets'), None)
1680
1685
1681 # pull off the manifest group
1686 # pull off the manifest group
1682 self.ui.status(_("adding manifests\n"))
1687 self.ui.status(_("adding manifests\n"))
1683 pr.step = _('manifests')
1688 pr.step = _('manifests')
1684 pr.count = 1
1689 pr.count = 1
1685 pr.total = changesets # manifests <= changesets
1690 pr.total = changesets # manifests <= changesets
1686 chunkiter = changegroup.chunkiter(source, progress=pr)
1691 chunkiter = changegroup.chunkiter(source, progress=pr)
1687 # no need to check for empty manifest group here:
1692 # no need to check for empty manifest group here:
1688 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1693 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1689 # no new manifest will be created and the manifest group will
1694 # no new manifest will be created and the manifest group will
1690 # be empty during the pull
1695 # be empty during the pull
1691 self.manifest.addgroup(chunkiter, revmap, trp)
1696 self.manifest.addgroup(chunkiter, revmap, trp)
1692 self.ui.progress(_('manifests'), None)
1697 self.ui.progress(_('manifests'), None)
1693
1698
1694 needfiles = {}
1699 needfiles = {}
1695 if self.ui.configbool('server', 'validate', default=False):
1700 if self.ui.configbool('server', 'validate', default=False):
1696 # validate incoming csets have their manifests
1701 # validate incoming csets have their manifests
1697 for cset in xrange(clstart, clend):
1702 for cset in xrange(clstart, clend):
1698 mfest = self.changelog.read(self.changelog.node(cset))[0]
1703 mfest = self.changelog.read(self.changelog.node(cset))[0]
1699 mfest = self.manifest.readdelta(mfest)
1704 mfest = self.manifest.readdelta(mfest)
1700 # store file nodes we must see
1705 # store file nodes we must see
1701 for f, n in mfest.iteritems():
1706 for f, n in mfest.iteritems():
1702 needfiles.setdefault(f, set()).add(n)
1707 needfiles.setdefault(f, set()).add(n)
1703
1708
1704 # process the files
1709 # process the files
1705 self.ui.status(_("adding file changes\n"))
1710 self.ui.status(_("adding file changes\n"))
1706 pr.step = 'files'
1711 pr.step = 'files'
1707 pr.count = 1
1712 pr.count = 1
1708 pr.total = efiles
1713 pr.total = efiles
1709 while 1:
1714 while 1:
1710 f = changegroup.getchunk(source)
1715 f = changegroup.getchunk(source)
1711 if not f:
1716 if not f:
1712 break
1717 break
1713 self.ui.debug("adding %s revisions\n" % f)
1718 self.ui.debug("adding %s revisions\n" % f)
1714 pr()
1719 pr()
1715 fl = self.file(f)
1720 fl = self.file(f)
1716 o = len(fl)
1721 o = len(fl)
1717 chunkiter = changegroup.chunkiter(source)
1722 chunkiter = changegroup.chunkiter(source)
1718 if fl.addgroup(chunkiter, revmap, trp) is None:
1723 if fl.addgroup(chunkiter, revmap, trp) is None:
1719 raise util.Abort(_("received file revlog group is empty"))
1724 raise util.Abort(_("received file revlog group is empty"))
1720 revisions += len(fl) - o
1725 revisions += len(fl) - o
1721 files += 1
1726 files += 1
1722 if f in needfiles:
1727 if f in needfiles:
1723 needs = needfiles[f]
1728 needs = needfiles[f]
1724 for new in xrange(o, len(fl)):
1729 for new in xrange(o, len(fl)):
1725 n = fl.node(new)
1730 n = fl.node(new)
1726 if n in needs:
1731 if n in needs:
1727 needs.remove(n)
1732 needs.remove(n)
1728 if not needs:
1733 if not needs:
1729 del needfiles[f]
1734 del needfiles[f]
1730 self.ui.progress(_('files'), None)
1735 self.ui.progress(_('files'), None)
1731
1736
1732 for f, needs in needfiles.iteritems():
1737 for f, needs in needfiles.iteritems():
1733 fl = self.file(f)
1738 fl = self.file(f)
1734 for n in needs:
1739 for n in needs:
1735 try:
1740 try:
1736 fl.rev(n)
1741 fl.rev(n)
1737 except error.LookupError:
1742 except error.LookupError:
1738 raise util.Abort(
1743 raise util.Abort(
1739 _('missing file data for %s:%s - run hg verify') %
1744 _('missing file data for %s:%s - run hg verify') %
1740 (f, hex(n)))
1745 (f, hex(n)))
1741
1746
1742 newheads = len(cl.heads())
1747 newheads = len(cl.heads())
1743 heads = ""
1748 heads = ""
1744 if oldheads and newheads != oldheads:
1749 if oldheads and newheads != oldheads:
1745 heads = _(" (%+d heads)") % (newheads - oldheads)
1750 heads = _(" (%+d heads)") % (newheads - oldheads)
1746
1751
1747 self.ui.status(_("added %d changesets"
1752 self.ui.status(_("added %d changesets"
1748 " with %d changes to %d files%s\n")
1753 " with %d changes to %d files%s\n")
1749 % (changesets, revisions, files, heads))
1754 % (changesets, revisions, files, heads))
1750
1755
1751 if changesets > 0:
1756 if changesets > 0:
1752 p = lambda: cl.writepending() and self.root or ""
1757 p = lambda: cl.writepending() and self.root or ""
1753 self.hook('pretxnchangegroup', throw=True,
1758 self.hook('pretxnchangegroup', throw=True,
1754 node=hex(cl.node(clstart)), source=srctype,
1759 node=hex(cl.node(clstart)), source=srctype,
1755 url=url, pending=p)
1760 url=url, pending=p)
1756
1761
1757 # make changelog see real files again
1762 # make changelog see real files again
1758 cl.finalize(trp)
1763 cl.finalize(trp)
1759
1764
1760 tr.close()
1765 tr.close()
1761 finally:
1766 finally:
1762 tr.release()
1767 tr.release()
1763 if lock:
1768 if lock:
1764 lock.release()
1769 lock.release()
1765
1770
1766 if changesets > 0:
1771 if changesets > 0:
1767 # forcefully update the on-disk branch cache
1772 # forcefully update the on-disk branch cache
1768 self.ui.debug("updating the branch cache\n")
1773 self.ui.debug("updating the branch cache\n")
1769 self.branchtags()
1774 self.branchtags()
1770 self.hook("changegroup", node=hex(cl.node(clstart)),
1775 self.hook("changegroup", node=hex(cl.node(clstart)),
1771 source=srctype, url=url)
1776 source=srctype, url=url)
1772
1777
1773 for i in xrange(clstart, clend):
1778 for i in xrange(clstart, clend):
1774 self.hook("incoming", node=hex(cl.node(i)),
1779 self.hook("incoming", node=hex(cl.node(i)),
1775 source=srctype, url=url)
1780 source=srctype, url=url)
1776
1781
1777 # never return 0 here:
1782 # never return 0 here:
1778 if newheads < oldheads:
1783 if newheads < oldheads:
1779 return newheads - oldheads - 1
1784 return newheads - oldheads - 1
1780 else:
1785 else:
1781 return newheads - oldheads + 1
1786 return newheads - oldheads + 1
1782
1787
1783
1788
1784 def stream_in(self, remote):
1789 def stream_in(self, remote):
1785 fp = remote.stream_out()
1790 fp = remote.stream_out()
1786 l = fp.readline()
1791 l = fp.readline()
1787 try:
1792 try:
1788 resp = int(l)
1793 resp = int(l)
1789 except ValueError:
1794 except ValueError:
1790 raise error.ResponseError(
1795 raise error.ResponseError(
1791 _('Unexpected response from remote server:'), l)
1796 _('Unexpected response from remote server:'), l)
1792 if resp == 1:
1797 if resp == 1:
1793 raise util.Abort(_('operation forbidden by server'))
1798 raise util.Abort(_('operation forbidden by server'))
1794 elif resp == 2:
1799 elif resp == 2:
1795 raise util.Abort(_('locking the remote repository failed'))
1800 raise util.Abort(_('locking the remote repository failed'))
1796 elif resp != 0:
1801 elif resp != 0:
1797 raise util.Abort(_('the server sent an unknown error code'))
1802 raise util.Abort(_('the server sent an unknown error code'))
1798 self.ui.status(_('streaming all changes\n'))
1803 self.ui.status(_('streaming all changes\n'))
1799 l = fp.readline()
1804 l = fp.readline()
1800 try:
1805 try:
1801 total_files, total_bytes = map(int, l.split(' ', 1))
1806 total_files, total_bytes = map(int, l.split(' ', 1))
1802 except (ValueError, TypeError):
1807 except (ValueError, TypeError):
1803 raise error.ResponseError(
1808 raise error.ResponseError(
1804 _('Unexpected response from remote server:'), l)
1809 _('Unexpected response from remote server:'), l)
1805 self.ui.status(_('%d files to transfer, %s of data\n') %
1810 self.ui.status(_('%d files to transfer, %s of data\n') %
1806 (total_files, util.bytecount(total_bytes)))
1811 (total_files, util.bytecount(total_bytes)))
1807 start = time.time()
1812 start = time.time()
1808 for i in xrange(total_files):
1813 for i in xrange(total_files):
1809 # XXX doesn't support '\n' or '\r' in filenames
1814 # XXX doesn't support '\n' or '\r' in filenames
1810 l = fp.readline()
1815 l = fp.readline()
1811 try:
1816 try:
1812 name, size = l.split('\0', 1)
1817 name, size = l.split('\0', 1)
1813 size = int(size)
1818 size = int(size)
1814 except (ValueError, TypeError):
1819 except (ValueError, TypeError):
1815 raise error.ResponseError(
1820 raise error.ResponseError(
1816 _('Unexpected response from remote server:'), l)
1821 _('Unexpected response from remote server:'), l)
1817 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1822 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1818 # for backwards compat, name was partially encoded
1823 # for backwards compat, name was partially encoded
1819 ofp = self.sopener(store.decodedir(name), 'w')
1824 ofp = self.sopener(store.decodedir(name), 'w')
1820 for chunk in util.filechunkiter(fp, limit=size):
1825 for chunk in util.filechunkiter(fp, limit=size):
1821 ofp.write(chunk)
1826 ofp.write(chunk)
1822 ofp.close()
1827 ofp.close()
1823 elapsed = time.time() - start
1828 elapsed = time.time() - start
1824 if elapsed <= 0:
1829 if elapsed <= 0:
1825 elapsed = 0.001
1830 elapsed = 0.001
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1831 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1827 (util.bytecount(total_bytes), elapsed,
1832 (util.bytecount(total_bytes), elapsed,
1828 util.bytecount(total_bytes / elapsed)))
1833 util.bytecount(total_bytes / elapsed)))
1829 self.invalidate()
1834 self.invalidate()
1830 return len(self.heads()) + 1
1835 return len(self.heads()) + 1
1831
1836
1832 def clone(self, remote, heads=[], stream=False):
1837 def clone(self, remote, heads=[], stream=False):
1833 '''clone remote repository.
1838 '''clone remote repository.
1834
1839
1835 keyword arguments:
1840 keyword arguments:
1836 heads: list of revs to clone (forces use of pull)
1841 heads: list of revs to clone (forces use of pull)
1837 stream: use streaming clone if possible'''
1842 stream: use streaming clone if possible'''
1838
1843
1839 # now, all clients that can request uncompressed clones can
1844 # now, all clients that can request uncompressed clones can
1840 # read repo formats supported by all servers that can serve
1845 # read repo formats supported by all servers that can serve
1841 # them.
1846 # them.
1842
1847
1843 # if revlog format changes, client will have to check version
1848 # if revlog format changes, client will have to check version
1844 # and format flags on "stream" capability, and use
1849 # and format flags on "stream" capability, and use
1845 # uncompressed only if compatible.
1850 # uncompressed only if compatible.
1846
1851
1847 if stream and not heads and remote.capable('stream'):
1852 if stream and not heads and remote.capable('stream'):
1848 return self.stream_in(remote)
1853 return self.stream_in(remote)
1849 return self.pull(remote, heads)
1854 return self.pull(remote, heads)
1850
1855
1851 def pushkey(self, namespace, key, old, new):
1856 def pushkey(self, namespace, key, old, new):
1852 return pushkey.push(self, namespace, key, old, new)
1857 return pushkey.push(self, namespace, key, old, new)
1853
1858
1854 def listkeys(self, namespace):
1859 def listkeys(self, namespace):
1855 return pushkey.list(self, namespace)
1860 return pushkey.list(self, namespace)
1856
1861
1857 # used to avoid circular references so destructors work
1862 # used to avoid circular references so destructors work
1858 def aftertrans(files):
1863 def aftertrans(files):
1859 renamefiles = [tuple(t) for t in files]
1864 renamefiles = [tuple(t) for t in files]
1860 def a():
1865 def a():
1861 for src, dest in renamefiles:
1866 for src, dest in renamefiles:
1862 util.rename(src, dest)
1867 util.rename(src, dest)
1863 return a
1868 return a
1864
1869
1865 def instance(ui, path, create):
1870 def instance(ui, path, create):
1866 return localrepository(ui, util.drop_scheme('file', path), create)
1871 return localrepository(ui, util.drop_scheme('file', path), create)
1867
1872
1868 def islocal(path):
1873 def islocal(path):
1869 return True
1874 return True
@@ -1,241 +1,244 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 rm -rf sub
3 rm -rf sub
4 mkdir sub
4 mkdir sub
5 cd sub
5 cd sub
6 hg init t
6 hg init t
7 cd t
7 cd t
8
8
9 echo % first revision, no sub
9 echo % first revision, no sub
10 echo a > a
10 echo a > a
11 hg ci -Am0
11 hg ci -Am0
12
12
13 echo % add first sub
13 echo % add first sub
14 echo s = s > .hgsub
14 echo s = s > .hgsub
15 hg add .hgsub
15 hg add .hgsub
16 hg init s
16 hg init s
17 echo a > s/a
17 echo a > s/a
18
19 # issue2232 - committing a subrepo without .hgsub
20 hg ci -mbad s
21
18 hg -R s ci -Ams0
22 hg -R s ci -Ams0
19 hg sum
23 hg sum
20 hg ci -m1
24 hg ci -m1
21
25
22 # issue 2022 - update -C
26 # issue 2022 - update -C
23
24 echo b > s/a
27 echo b > s/a
25 hg sum
28 hg sum
26 hg co -C 1
29 hg co -C 1
27 hg sum
30 hg sum
28
31
29 echo % add sub sub
32 echo % add sub sub
30 echo ss = ss > s/.hgsub
33 echo ss = ss > s/.hgsub
31 hg init s/ss
34 hg init s/ss
32 echo a > s/ss/a
35 echo a > s/ss/a
33 hg -R s add s/.hgsub
36 hg -R s add s/.hgsub
34 hg -R s/ss add s/ss/a
37 hg -R s/ss add s/ss/a
35 hg sum
38 hg sum
36 hg ci -m2
39 hg ci -m2
37 hg sum
40 hg sum
38
41
39 echo % bump sub rev
42 echo % bump sub rev
40 echo b > s/a
43 echo b > s/a
41 hg -R s ci -ms1
44 hg -R s ci -ms1
42 hg ci -m3
45 hg ci -m3
43
46
44 echo % leave sub dirty
47 echo % leave sub dirty
45 echo c > s/a
48 echo c > s/a
46 hg ci -m4
49 hg ci -m4
47 hg tip -R s
50 hg tip -R s
48
51
49 echo % check caching
52 echo % check caching
50 hg co 0
53 hg co 0
51 hg debugsub
54 hg debugsub
52 echo % restore
55 echo % restore
53 hg co
56 hg co
54 hg debugsub
57 hg debugsub
55
58
56 echo % new branch for merge tests
59 echo % new branch for merge tests
57 hg co 1
60 hg co 1
58 echo t = t >> .hgsub
61 echo t = t >> .hgsub
59 hg init t
62 hg init t
60 echo t > t/t
63 echo t > t/t
61 hg -R t add t
64 hg -R t add t
62 echo % 5
65 echo % 5
63 hg ci -m5 # add sub
66 hg ci -m5 # add sub
64 echo t2 > t/t
67 echo t2 > t/t
65 echo % 6
68 echo % 6
66 hg st -R s
69 hg st -R s
67 hg ci -m6 # change sub
70 hg ci -m6 # change sub
68 hg debugsub
71 hg debugsub
69 echo t3 > t/t
72 echo t3 > t/t
70 echo % 7
73 echo % 7
71 hg ci -m7 # change sub again for conflict test
74 hg ci -m7 # change sub again for conflict test
72 hg rm .hgsub
75 hg rm .hgsub
73 echo % 8
76 echo % 8
74 hg ci -m8 # remove sub
77 hg ci -m8 # remove sub
75
78
76 echo % merge tests
79 echo % merge tests
77 hg co -C 3
80 hg co -C 3
78 hg merge 5 # test adding
81 hg merge 5 # test adding
79 hg debugsub
82 hg debugsub
80 hg ci -m9
83 hg ci -m9
81 hg merge 6 --debug # test change
84 hg merge 6 --debug # test change
82 hg debugsub
85 hg debugsub
83 echo conflict > t/t
86 echo conflict > t/t
84 hg ci -m10
87 hg ci -m10
85 HGMERGE=internal:merge hg merge --debug 7 # test conflict
88 HGMERGE=internal:merge hg merge --debug 7 # test conflict
86 echo % should conflict
89 echo % should conflict
87 cat t/t
90 cat t/t
88
91
89 echo % clone
92 echo % clone
90 cd ..
93 cd ..
91 hg clone t tc | sed 's|from .*/sub|from .../sub|g'
94 hg clone t tc | sed 's|from .*/sub|from .../sub|g'
92 cd tc
95 cd tc
93 hg debugsub
96 hg debugsub
94
97
95 echo % push
98 echo % push
96 echo bah > t/t
99 echo bah > t/t
97 hg ci -m11
100 hg ci -m11
98 hg push | sed 's/ .*sub/ ...sub/g'
101 hg push | sed 's/ .*sub/ ...sub/g'
99
102
100 echo % push -f
103 echo % push -f
101 echo bah > s/a
104 echo bah > s/a
102 hg ci -m12
105 hg ci -m12
103 hg push | sed 's/ .*sub/ ...sub/g'
106 hg push | sed 's/ .*sub/ ...sub/g'
104 hg push -f | sed 's/ .*sub/ ...sub/g'
107 hg push -f | sed 's/ .*sub/ ...sub/g'
105
108
106 echo % update
109 echo % update
107 cd ../t
110 cd ../t
108 hg up -C # discard our earlier merge
111 hg up -C # discard our earlier merge
109 echo blah > t/t
112 echo blah > t/t
110 hg ci -m13
113 hg ci -m13
111
114
112 echo % pull
115 echo % pull
113 cd ../tc
116 cd ../tc
114 hg pull | sed 's/ .*sub/ ...sub/g'
117 hg pull | sed 's/ .*sub/ ...sub/g'
115 # should pull t
118 # should pull t
116 hg up | sed 's|from .*/sub|from .../sub|g'
119 hg up | sed 's|from .*/sub|from .../sub|g'
117 cat t/t
120 cat t/t
118
121
119 echo % bogus subrepo path aborts
122 echo % bogus subrepo path aborts
120 echo 'bogus=[boguspath' >> .hgsub
123 echo 'bogus=[boguspath' >> .hgsub
121 hg ci -m 'bogus subrepo path'
124 hg ci -m 'bogus subrepo path'
122
125
123 echo % issue 1986
126 echo % issue 1986
124 cd ..
127 cd ..
125 rm -rf sub
128 rm -rf sub
126 hg init main
129 hg init main
127 cd main
130 cd main
128
131
129 hg init s # subrepo layout
132 hg init s # subrepo layout
130 cd s #
133 cd s #
131 echo a > a # o 5 br
134 echo a > a # o 5 br
132 hg ci -Am1 # /|
135 hg ci -Am1 # /|
133 hg branch br # o | 4 default
136 hg branch br # o | 4 default
134 echo a >> a # | |
137 echo a >> a # | |
135 hg ci -m1 # | o 3 br
138 hg ci -m1 # | o 3 br
136 hg up default # |/|
139 hg up default # |/|
137 echo b > b # o | 2 default
140 echo b > b # o | 2 default
138 hg ci -Am1 # | |
141 hg ci -Am1 # | |
139 hg up br # | o 1 br
142 hg up br # | o 1 br
140 hg merge tip # |/
143 hg merge tip # |/
141 hg ci -m1 # o 0 default
144 hg ci -m1 # o 0 default
142 hg up 2
145 hg up 2
143 echo c > c
146 echo c > c
144 hg ci -Am1
147 hg ci -Am1
145 hg up 3
148 hg up 3
146 hg merge 4
149 hg merge 4
147 hg ci -m1
150 hg ci -m1
148
151
149 cd .. # main repo layout:
152 cd .. # main repo layout:
150 echo 's = s' > .hgsub #
153 echo 's = s' > .hgsub #
151 hg -R s up 2 # * <-- try to merge default into br again
154 hg -R s up 2 # * <-- try to merge default into br again
152 hg ci -Am1 # .`|
155 hg ci -Am1 # .`|
153 hg branch br # . o 5 br --> substate = 5
156 hg branch br # . o 5 br --> substate = 5
154 echo b > b # . |
157 echo b > b # . |
155 hg -R s up 3 # o | 4 default --> substate = 4
158 hg -R s up 3 # o | 4 default --> substate = 4
156 hg ci -Am1 # | |
159 hg ci -Am1 # | |
157 hg up default # | o 3 br --> substate = 2
160 hg up default # | o 3 br --> substate = 2
158 echo c > c # |/|
161 echo c > c # |/|
159 hg ci -Am1 # o | 2 default --> substate = 2
162 hg ci -Am1 # o | 2 default --> substate = 2
160 hg up 1 # | |
163 hg up 1 # | |
161 hg merge 2 # | o 1 br --> substate = 3
164 hg merge 2 # | o 1 br --> substate = 3
162 hg ci -m1 # |/
165 hg ci -m1 # |/
163 hg up 2 # o 0 default --> substate = 2
166 hg up 2 # o 0 default --> substate = 2
164 hg -R s up 4
167 hg -R s up 4
165 echo d > d
168 echo d > d
166 hg ci -Am1
169 hg ci -Am1
167 hg up 3
170 hg up 3
168 hg -R s up 5
171 hg -R s up 5
169 echo e > e
172 echo e > e
170 hg ci -Am1
173 hg ci -Am1
171
174
172 hg up 5
175 hg up 5
173 hg merge 4 # try to merge default into br again
176 hg merge 4 # try to merge default into br again
174 cd ..
177 cd ..
175
178
176 echo % test subrepo delete from .hgsubstate
179 echo % test subrepo delete from .hgsubstate
177 hg init testdelete
180 hg init testdelete
178 mkdir testdelete/nested testdelete/nested2
181 mkdir testdelete/nested testdelete/nested2
179 hg init testdelete/nested
182 hg init testdelete/nested
180 hg init testdelete/nested2
183 hg init testdelete/nested2
181 echo test > testdelete/nested/foo
184 echo test > testdelete/nested/foo
182 echo test > testdelete/nested2/foo
185 echo test > testdelete/nested2/foo
183 hg -R testdelete/nested add
186 hg -R testdelete/nested add
184 hg -R testdelete/nested2 add
187 hg -R testdelete/nested2 add
185 hg -R testdelete/nested ci -m test
188 hg -R testdelete/nested ci -m test
186 hg -R testdelete/nested2 ci -m test
189 hg -R testdelete/nested2 ci -m test
187 echo nested = nested > testdelete/.hgsub
190 echo nested = nested > testdelete/.hgsub
188 echo nested2 = nested2 >> testdelete/.hgsub
191 echo nested2 = nested2 >> testdelete/.hgsub
189 hg -R testdelete add
192 hg -R testdelete add
190 hg -R testdelete ci -m "nested 1 & 2 added"
193 hg -R testdelete ci -m "nested 1 & 2 added"
191 echo nested = nested > testdelete/.hgsub
194 echo nested = nested > testdelete/.hgsub
192 hg -R testdelete ci -m "nested 2 deleted"
195 hg -R testdelete ci -m "nested 2 deleted"
193 cat testdelete/.hgsubstate | sed "s:.* ::"
196 cat testdelete/.hgsubstate | sed "s:.* ::"
194 hg -R testdelete remove testdelete/.hgsub
197 hg -R testdelete remove testdelete/.hgsub
195 hg -R testdelete ci -m ".hgsub deleted"
198 hg -R testdelete ci -m ".hgsub deleted"
196 cat testdelete/.hgsubstate
199 cat testdelete/.hgsubstate
197
200
198 echo % test repository cloning
201 echo % test repository cloning
199 mkdir mercurial mercurial2
202 mkdir mercurial mercurial2
200 hg init nested_absolute
203 hg init nested_absolute
201 echo test > nested_absolute/foo
204 echo test > nested_absolute/foo
202 hg -R nested_absolute add
205 hg -R nested_absolute add
203 hg -R nested_absolute ci -mtest
206 hg -R nested_absolute ci -mtest
204 cd mercurial
207 cd mercurial
205 hg init nested_relative
208 hg init nested_relative
206 echo test2 > nested_relative/foo2
209 echo test2 > nested_relative/foo2
207 hg -R nested_relative add
210 hg -R nested_relative add
208 hg -R nested_relative ci -mtest2
211 hg -R nested_relative ci -mtest2
209 hg init main
212 hg init main
210 echo "nested_relative = ../nested_relative" > main/.hgsub
213 echo "nested_relative = ../nested_relative" > main/.hgsub
211 echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub
214 echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub
212 hg -R main add
215 hg -R main add
213 hg -R main ci -m "add subrepos"
216 hg -R main ci -m "add subrepos"
214 cd ..
217 cd ..
215 hg clone mercurial/main mercurial2/main
218 hg clone mercurial/main mercurial2/main
216 cat mercurial2/main/nested_absolute/.hg/hgrc \
219 cat mercurial2/main/nested_absolute/.hg/hgrc \
217 mercurial2/main/nested_relative/.hg/hgrc \
220 mercurial2/main/nested_relative/.hg/hgrc \
218 | "$TESTDIR/filtertmp.py"
221 | "$TESTDIR/filtertmp.py"
219 rm -rf mercurial mercurial2
222 rm -rf mercurial mercurial2
220
223
221 echo % issue 1977
224 echo % issue 1977
222 hg init repo
225 hg init repo
223 hg init repo/s
226 hg init repo/s
224 echo a > repo/s/a
227 echo a > repo/s/a
225 hg -R repo/s ci -Am0
228 hg -R repo/s ci -Am0
226 echo s = s > repo/.hgsub
229 echo s = s > repo/.hgsub
227 hg -R repo ci -Am1
230 hg -R repo ci -Am1
228 hg clone repo repo2 | sed 's|from .*/sub|from .../sub|g'
231 hg clone repo repo2 | sed 's|from .*/sub|from .../sub|g'
229 hg -q -R repo2 pull -u
232 hg -q -R repo2 pull -u
230 echo 1 > repo2/s/a
233 echo 1 > repo2/s/a
231 hg -R repo2/s ci -m2
234 hg -R repo2/s ci -m2
232 hg -q -R repo2/s push
235 hg -q -R repo2/s push
233 hg -R repo2/s up -C 0
236 hg -R repo2/s up -C 0
234 echo 2 > repo2/s/a
237 echo 2 > repo2/s/a
235 hg -R repo2/s ci -m3
238 hg -R repo2/s ci -m3
236 hg -R repo2 ci -m3
239 hg -R repo2 ci -m3
237 hg -q -R repo2 push
240 hg -q -R repo2 push
238 hg -R repo update
241 hg -R repo update
239 rm -rf repo2 repo
242 rm -rf repo2 repo
240
243
241 exit 0
244 exit 0
@@ -1,302 +1,303 b''
1 % first revision, no sub
1 % first revision, no sub
2 adding a
2 adding a
3 % add first sub
3 % add first sub
4 abort: can't commit subrepos without .hgsub
4 adding a
5 adding a
5 parent: 0:f7b1eb17ad24 tip
6 parent: 0:f7b1eb17ad24 tip
6 0
7 0
7 branch: default
8 branch: default
8 commit: 1 added, 1 subrepos
9 commit: 1 added, 1 subrepos
9 update: (current)
10 update: (current)
10 committing subrepository s
11 committing subrepository s
11 parent: 1:7cf8cfea66e4 tip
12 parent: 1:7cf8cfea66e4 tip
12 1
13 1
13 branch: default
14 branch: default
14 commit: 1 subrepos
15 commit: 1 subrepos
15 update: (current)
16 update: (current)
16 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 parent: 1:7cf8cfea66e4 tip
18 parent: 1:7cf8cfea66e4 tip
18 1
19 1
19 branch: default
20 branch: default
20 commit: (clean)
21 commit: (clean)
21 update: (current)
22 update: (current)
22 % add sub sub
23 % add sub sub
23 parent: 1:7cf8cfea66e4 tip
24 parent: 1:7cf8cfea66e4 tip
24 1
25 1
25 branch: default
26 branch: default
26 commit: 1 subrepos
27 commit: 1 subrepos
27 update: (current)
28 update: (current)
28 committing subrepository s
29 committing subrepository s
29 committing subrepository s/ss
30 committing subrepository s/ss
30 parent: 2:df30734270ae tip
31 parent: 2:df30734270ae tip
31 2
32 2
32 branch: default
33 branch: default
33 commit: (clean)
34 commit: (clean)
34 update: (current)
35 update: (current)
35 % bump sub rev
36 % bump sub rev
36 committing subrepository s
37 committing subrepository s
37 % leave sub dirty
38 % leave sub dirty
38 committing subrepository s
39 committing subrepository s
39 changeset: 3:1c833a7a9e3a
40 changeset: 3:1c833a7a9e3a
40 tag: tip
41 tag: tip
41 user: test
42 user: test
42 date: Thu Jan 01 00:00:00 1970 +0000
43 date: Thu Jan 01 00:00:00 1970 +0000
43 summary: 4
44 summary: 4
44
45
45 % check caching
46 % check caching
46 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
47 % restore
48 % restore
48 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 path s
50 path s
50 source s
51 source s
51 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
52 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
52 % new branch for merge tests
53 % new branch for merge tests
53 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 adding t/t
55 adding t/t
55 % 5
56 % 5
56 committing subrepository t
57 committing subrepository t
57 created new head
58 created new head
58 % 6
59 % 6
59 committing subrepository t
60 committing subrepository t
60 path s
61 path s
61 source s
62 source s
62 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
63 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
63 path t
64 path t
64 source t
65 source t
65 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
66 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
66 % 7
67 % 7
67 committing subrepository t
68 committing subrepository t
68 % 8
69 % 8
69 % merge tests
70 % merge tests
70 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
71 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 (branch merge, don't forget to commit)
73 (branch merge, don't forget to commit)
73 path s
74 path s
74 source s
75 source s
75 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
76 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
76 path t
77 path t
77 source t
78 source t
78 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
79 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
79 created new head
80 created new head
80 searching for copies back to rev 2
81 searching for copies back to rev 2
81 resolving manifests
82 resolving manifests
82 overwrite None partial False
83 overwrite None partial False
83 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
84 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
84 .hgsubstate: versions differ -> m
85 .hgsubstate: versions differ -> m
85 update: .hgsubstate 1/1 files (100.00%)
86 update: .hgsubstate 1/1 files (100.00%)
86 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
87 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
87 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
88 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
88 getting subrepo t
89 getting subrepo t
89 resolving manifests
90 resolving manifests
90 overwrite True partial False
91 overwrite True partial False
91 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
92 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
92 t: remote is newer -> g
93 t: remote is newer -> g
93 update: t 1/1 files (100.00%)
94 update: t 1/1 files (100.00%)
94 getting t
95 getting t
95 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 (branch merge, don't forget to commit)
97 (branch merge, don't forget to commit)
97 path s
98 path s
98 source s
99 source s
99 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
100 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
100 path t
101 path t
101 source t
102 source t
102 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
103 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
103 committing subrepository t
104 committing subrepository t
104 searching for copies back to rev 2
105 searching for copies back to rev 2
105 resolving manifests
106 resolving manifests
106 overwrite None partial False
107 overwrite None partial False
107 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
108 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
108 .hgsubstate: versions differ -> m
109 .hgsubstate: versions differ -> m
109 update: .hgsubstate 1/1 files (100.00%)
110 update: .hgsubstate 1/1 files (100.00%)
110 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
111 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
111 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
112 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
112 merging subrepo t
113 merging subrepo t
113 searching for copies back to rev 2
114 searching for copies back to rev 2
114 resolving manifests
115 resolving manifests
115 overwrite None partial False
116 overwrite None partial False
116 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
117 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
117 t: versions differ -> m
118 t: versions differ -> m
118 preserving t for resolve of t
119 preserving t for resolve of t
119 update: t 1/1 files (100.00%)
120 update: t 1/1 files (100.00%)
120 picked tool 'internal:merge' for t (binary False symlink False)
121 picked tool 'internal:merge' for t (binary False symlink False)
121 merging t
122 merging t
122 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
123 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
123 warning: conflicts during merge.
124 warning: conflicts during merge.
124 merging t failed!
125 merging t failed!
125 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
126 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
126 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
127 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
127 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
128 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
128 (branch merge, don't forget to commit)
129 (branch merge, don't forget to commit)
129 % should conflict
130 % should conflict
130 <<<<<<< local
131 <<<<<<< local
131 conflict
132 conflict
132 =======
133 =======
133 t3
134 t3
134 >>>>>>> other
135 >>>>>>> other
135 % clone
136 % clone
136 updating to branch default
137 updating to branch default
137 pulling subrepo s from .../sub/t/s
138 pulling subrepo s from .../sub/t/s
138 requesting all changes
139 requesting all changes
139 adding changesets
140 adding changesets
140 adding manifests
141 adding manifests
141 adding file changes
142 adding file changes
142 added 4 changesets with 5 changes to 3 files
143 added 4 changesets with 5 changes to 3 files
143 pulling subrepo s/ss from .../sub/t/s/ss
144 pulling subrepo s/ss from .../sub/t/s/ss
144 requesting all changes
145 requesting all changes
145 adding changesets
146 adding changesets
146 adding manifests
147 adding manifests
147 adding file changes
148 adding file changes
148 added 1 changesets with 1 changes to 1 files
149 added 1 changesets with 1 changes to 1 files
149 pulling subrepo t from .../sub/t/t
150 pulling subrepo t from .../sub/t/t
150 requesting all changes
151 requesting all changes
151 adding changesets
152 adding changesets
152 adding manifests
153 adding manifests
153 adding file changes
154 adding file changes
154 added 4 changesets with 4 changes to 1 files (+1 heads)
155 added 4 changesets with 4 changes to 1 files (+1 heads)
155 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 path s
157 path s
157 source s
158 source s
158 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
159 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
159 path t
160 path t
160 source t
161 source t
161 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
162 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
162 % push
163 % push
163 committing subrepository t
164 committing subrepository t
164 pushing ...sub/t
165 pushing ...sub/t
165 pushing ...sub/t/s/ss
166 pushing ...sub/t/s/ss
166 searching for changes
167 searching for changes
167 no changes found
168 no changes found
168 pushing ...sub/t/s
169 pushing ...sub/t/s
169 searching for changes
170 searching for changes
170 no changes found
171 no changes found
171 pushing ...sub/t/t
172 pushing ...sub/t/t
172 searching for changes
173 searching for changes
173 adding changesets
174 adding changesets
174 adding manifests
175 adding manifests
175 adding file changes
176 adding file changes
176 added 1 changesets with 1 changes to 1 files
177 added 1 changesets with 1 changes to 1 files
177 searching for changes
178 searching for changes
178 adding changesets
179 adding changesets
179 adding manifests
180 adding manifests
180 adding file changes
181 adding file changes
181 added 1 changesets with 1 changes to 1 files
182 added 1 changesets with 1 changes to 1 files
182 % push -f
183 % push -f
183 committing subrepository s
184 committing subrepository s
184 abort: push creates new remote heads on branch 'default'!
185 abort: push creates new remote heads on branch 'default'!
185 pushing ...sub/t
186 pushing ...sub/t
186 pushing ...sub/t/s/ss
187 pushing ...sub/t/s/ss
187 searching for changes
188 searching for changes
188 no changes found
189 no changes found
189 pushing ...sub/t/s
190 pushing ...sub/t/s
190 searching for changes
191 searching for changes
191 (did you forget to merge? use push -f to force)
192 (did you forget to merge? use push -f to force)
192 pushing ...sub/t
193 pushing ...sub/t
193 pushing ...sub/t/s/ss
194 pushing ...sub/t/s/ss
194 searching for changes
195 searching for changes
195 no changes found
196 no changes found
196 pushing ...sub/t/s
197 pushing ...sub/t/s
197 searching for changes
198 searching for changes
198 adding changesets
199 adding changesets
199 adding manifests
200 adding manifests
200 adding file changes
201 adding file changes
201 added 1 changesets with 1 changes to 1 files (+1 heads)
202 added 1 changesets with 1 changes to 1 files (+1 heads)
202 pushing ...sub/t/t
203 pushing ...sub/t/t
203 searching for changes
204 searching for changes
204 no changes found
205 no changes found
205 searching for changes
206 searching for changes
206 adding changesets
207 adding changesets
207 adding manifests
208 adding manifests
208 adding file changes
209 adding file changes
209 added 1 changesets with 1 changes to 1 files
210 added 1 changesets with 1 changes to 1 files
210 % update
211 % update
211 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
212 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
212 committing subrepository t
213 committing subrepository t
213 % pull
214 % pull
214 pulling ...sub/t
215 pulling ...sub/t
215 searching for changes
216 searching for changes
216 adding changesets
217 adding changesets
217 adding manifests
218 adding manifests
218 adding file changes
219 adding file changes
219 added 1 changesets with 1 changes to 1 files
220 added 1 changesets with 1 changes to 1 files
220 (run 'hg update' to get a working copy)
221 (run 'hg update' to get a working copy)
221 pulling subrepo t from .../sub/t/t
222 pulling subrepo t from .../sub/t/t
222 searching for changes
223 searching for changes
223 adding changesets
224 adding changesets
224 adding manifests
225 adding manifests
225 adding file changes
226 adding file changes
226 added 1 changesets with 1 changes to 1 files
227 added 1 changesets with 1 changes to 1 files
227 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
228 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
228 blah
229 blah
229 % bogus subrepo path aborts
230 % bogus subrepo path aborts
230 abort: missing ] in subrepo source
231 abort: missing ] in subrepo source
231 % issue 1986
232 % issue 1986
232 adding a
233 adding a
233 marked working directory as branch br
234 marked working directory as branch br
234 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
235 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
235 adding b
236 adding b
236 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
237 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
237 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
238 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
238 (branch merge, don't forget to commit)
239 (branch merge, don't forget to commit)
239 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
240 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
240 adding c
241 adding c
241 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
242 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
242 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 (branch merge, don't forget to commit)
244 (branch merge, don't forget to commit)
244 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
245 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
245 adding .hgsub
246 adding .hgsub
246 committing subrepository s
247 committing subrepository s
247 marked working directory as branch br
248 marked working directory as branch br
248 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 adding b
250 adding b
250 committing subrepository s
251 committing subrepository s
251 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
252 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
252 adding c
253 adding c
253 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
254 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
255 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
255 (branch merge, don't forget to commit)
256 (branch merge, don't forget to commit)
256 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
257 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
257 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 adding d
259 adding d
259 committing subrepository s
260 committing subrepository s
260 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
261 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 adding e
263 adding e
263 committing subrepository s
264 committing subrepository s
264 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 (branch merge, don't forget to commit)
267 (branch merge, don't forget to commit)
267 % test subrepo delete from .hgsubstate
268 % test subrepo delete from .hgsubstate
268 adding testdelete/nested/foo
269 adding testdelete/nested/foo
269 adding testdelete/nested2/foo
270 adding testdelete/nested2/foo
270 adding testdelete/.hgsub
271 adding testdelete/.hgsub
271 committing subrepository nested2
272 committing subrepository nested2
272 committing subrepository nested
273 committing subrepository nested
273 nested
274 nested
274 % test repository cloning
275 % test repository cloning
275 adding nested_absolute/foo
276 adding nested_absolute/foo
276 adding nested_relative/foo2
277 adding nested_relative/foo2
277 adding main/.hgsub
278 adding main/.hgsub
278 committing subrepository nested_relative
279 committing subrepository nested_relative
279 committing subrepository nested_absolute
280 committing subrepository nested_absolute
280 updating to branch default
281 updating to branch default
281 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 [paths]
283 [paths]
283 default = $HGTMP/test-subrepo/sub/mercurial/nested_absolute
284 default = $HGTMP/test-subrepo/sub/mercurial/nested_absolute
284 [paths]
285 [paths]
285 default = $HGTMP/test-subrepo/sub/mercurial/nested_relative
286 default = $HGTMP/test-subrepo/sub/mercurial/nested_relative
286 % issue 1977
287 % issue 1977
287 adding a
288 adding a
288 adding .hgsub
289 adding .hgsub
289 committing subrepository s
290 committing subrepository s
290 updating to branch default
291 updating to branch default
291 pulling subrepo s from .../sub/repo/s
292 pulling subrepo s from .../sub/repo/s
292 requesting all changes
293 requesting all changes
293 adding changesets
294 adding changesets
294 adding manifests
295 adding manifests
295 adding file changes
296 adding file changes
296 added 1 changesets with 1 changes to 1 files
297 added 1 changesets with 1 changes to 1 files
297 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
298 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
298 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
299 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
299 created new head
300 created new head
300 committing subrepository s
301 committing subrepository s
301 abort: push creates new remote heads on branch 'default'!
302 abort: push creates new remote heads on branch 'default'!
302 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
303 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now