##// END OF EJS Templates
tags: silence warning about unknown tags...
Matt Mackall -
r8857:5096a47d default
parent child Browse files
Show More
@@ -1,2176 +1,2176 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 self.tagscache = None
93 self._tagstypecache = None
93 self._tagstypecache = None
94 self.branchcache = None
94 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
96 self._branchcachetip = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 @propertycache
102 @propertycache
103 def changelog(self):
103 def changelog(self):
104 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
107 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
110 return c
110 return c
111
111
112 @propertycache
112 @propertycache
113 def manifest(self):
113 def manifest(self):
114 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
115
115
116 @propertycache
116 @propertycache
117 def dirstate(self):
117 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid is None:
121 if changeid is None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
144 if isinstance(names, str):
145 allchars = names
145 allchars = names
146 names = (names,)
146 names = (names,)
147 else:
147 else:
148 allchars = ''.join(names)
148 allchars = ''.join(names)
149 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
150 if c in allchars:
150 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
152
153 for name in names:
153 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
155 local=local)
156
156
157 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
158 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
160 fp.write('\n')
161 for name in names:
161 for name in names:
162 m = munge and munge(name) or name
162 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
164 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
167 fp.close()
168
168
169 prevtags = ''
169 prevtags = ''
170 if local:
170 if local:
171 try:
171 try:
172 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
173 except IOError:
173 except IOError:
174 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177
177
178 # local tags are stored in the current charset
178 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
180 for name in names:
180 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
182 return
183
183
184 try:
184 try:
185 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
186 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
193
193
194 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
195 self.add(['.hgtags'])
196
196
197 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
199
200 for name in names:
200 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202
202
203 return tagnode
203 return tagnode
204
204
205 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
207
207
208 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
209 string.
209 string.
210
210
211 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
213 changeset is committed with the change.
214
214
215 keyword arguments:
215 keyword arguments:
216
216
217 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
218 (default False)
218 (default False)
219
219
220 message: commit message to use if committing
220 message: commit message to use if committing
221
221
222 user: name of user to use if committing
222 user: name of user to use if committing
223
223
224 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
225
225
226 for x in self.status()[:5]:
226 for x in self.status()[:5]:
227 if '.hgtags' in x:
227 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
230
230
231 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
233
233
234 def tags(self):
234 def tags(self):
235 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache:
237 return self.tagscache
237 return self.tagscache
238
238
239 globaltags = {}
239 globaltags = {}
240 tagtypes = {}
240 tagtypes = {}
241
241
242 def readtags(lines, fn, tagtype):
242 def readtags(lines, fn, tagtype):
243 filetags = {}
243 filetags = {}
244 count = 0
244 count = 0
245
245
246 def warn(msg):
246 def warn(msg):
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248
248
249 for l in lines:
249 for l in lines:
250 count += 1
250 count += 1
251 if not l:
251 if not l:
252 continue
252 continue
253 s = l.split(" ", 1)
253 s = l.split(" ", 1)
254 if len(s) != 2:
254 if len(s) != 2:
255 warn(_("cannot parse entry"))
255 warn(_("cannot parse entry"))
256 continue
256 continue
257 node, key = s
257 node, key = s
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 try:
259 try:
260 bin_n = bin(node)
260 bin_n = bin(node)
261 except TypeError:
261 except TypeError:
262 warn(_("node '%s' is not well formed") % node)
262 warn(_("node '%s' is not well formed") % node)
263 continue
263 continue
264 if bin_n not in self.changelog.nodemap:
264 if bin_n not in self.changelog.nodemap:
265 warn(_("tag '%s' refers to unknown node") % key)
265 # silently ignore as pull -r might cause this
266 continue
266 continue
267
267
268 h = []
268 h = []
269 if key in filetags:
269 if key in filetags:
270 n, h = filetags[key]
270 n, h = filetags[key]
271 h.append(n)
271 h.append(n)
272 filetags[key] = (bin_n, h)
272 filetags[key] = (bin_n, h)
273
273
274 for k, nh in filetags.iteritems():
274 for k, nh in filetags.iteritems():
275 if k not in globaltags:
275 if k not in globaltags:
276 globaltags[k] = nh
276 globaltags[k] = nh
277 tagtypes[k] = tagtype
277 tagtypes[k] = tagtype
278 continue
278 continue
279
279
280 # we prefer the global tag if:
280 # we prefer the global tag if:
281 # it supercedes us OR
281 # it supercedes us OR
282 # mutual supercedes and it has a higher rank
282 # mutual supercedes and it has a higher rank
283 # otherwise we win because we're tip-most
283 # otherwise we win because we're tip-most
284 an, ah = nh
284 an, ah = nh
285 bn, bh = globaltags[k]
285 bn, bh = globaltags[k]
286 if (bn != an and an in bh and
286 if (bn != an and an in bh and
287 (bn not in ah or len(bh) > len(ah))):
287 (bn not in ah or len(bh) > len(ah))):
288 an = bn
288 an = bn
289 ah.extend([n for n in bh if n not in ah])
289 ah.extend([n for n in bh if n not in ah])
290 globaltags[k] = an, ah
290 globaltags[k] = an, ah
291 tagtypes[k] = tagtype
291 tagtypes[k] = tagtype
292
292
293 seen = set()
293 seen = set()
294 f = None
294 f = None
295 ctxs = []
295 ctxs = []
296 for node in self.heads():
296 for node in self.heads():
297 try:
297 try:
298 fnode = self[node].filenode('.hgtags')
298 fnode = self[node].filenode('.hgtags')
299 except error.LookupError:
299 except error.LookupError:
300 continue
300 continue
301 if fnode not in seen:
301 if fnode not in seen:
302 seen.add(fnode)
302 seen.add(fnode)
303 if not f:
303 if not f:
304 f = self.filectx('.hgtags', fileid=fnode)
304 f = self.filectx('.hgtags', fileid=fnode)
305 else:
305 else:
306 f = f.filectx(fnode)
306 f = f.filectx(fnode)
307 ctxs.append(f)
307 ctxs.append(f)
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 for f in reversed(ctxs):
310 for f in reversed(ctxs):
311 readtags(f.data().splitlines(), f, "global")
311 readtags(f.data().splitlines(), f, "global")
312
312
313 try:
313 try:
314 data = encoding.fromlocal(self.opener("localtags").read())
314 data = encoding.fromlocal(self.opener("localtags").read())
315 # localtags are stored in the local character set
315 # localtags are stored in the local character set
316 # while the internal tag table is stored in UTF-8
316 # while the internal tag table is stored in UTF-8
317 readtags(data.splitlines(), "localtags", "local")
317 readtags(data.splitlines(), "localtags", "local")
318 except IOError:
318 except IOError:
319 pass
319 pass
320
320
321 self.tagscache = {}
321 self.tagscache = {}
322 self._tagstypecache = {}
322 self._tagstypecache = {}
323 for k, nh in globaltags.iteritems():
323 for k, nh in globaltags.iteritems():
324 n = nh[0]
324 n = nh[0]
325 if n != nullid:
325 if n != nullid:
326 self.tagscache[k] = n
326 self.tagscache[k] = n
327 self._tagstypecache[k] = tagtypes[k]
327 self._tagstypecache[k] = tagtypes[k]
328 self.tagscache['tip'] = self.changelog.tip()
328 self.tagscache['tip'] = self.changelog.tip()
329 return self.tagscache
329 return self.tagscache
330
330
331 def tagtype(self, tagname):
331 def tagtype(self, tagname):
332 '''
332 '''
333 return the type of the given tag. result can be:
333 return the type of the given tag. result can be:
334
334
335 'local' : a local tag
335 'local' : a local tag
336 'global' : a global tag
336 'global' : a global tag
337 None : tag does not exist
337 None : tag does not exist
338 '''
338 '''
339
339
340 self.tags()
340 self.tags()
341
341
342 return self._tagstypecache.get(tagname)
342 return self._tagstypecache.get(tagname)
343
343
344 def tagslist(self):
344 def tagslist(self):
345 '''return a list of tags ordered by revision'''
345 '''return a list of tags ordered by revision'''
346 l = []
346 l = []
347 for t, n in self.tags().iteritems():
347 for t, n in self.tags().iteritems():
348 try:
348 try:
349 r = self.changelog.rev(n)
349 r = self.changelog.rev(n)
350 except:
350 except:
351 r = -2 # sort to the beginning of the list if unknown
351 r = -2 # sort to the beginning of the list if unknown
352 l.append((r, t, n))
352 l.append((r, t, n))
353 return [(t, n) for r, t, n in sorted(l)]
353 return [(t, n) for r, t, n in sorted(l)]
354
354
355 def nodetags(self, node):
355 def nodetags(self, node):
356 '''return the tags associated with a node'''
356 '''return the tags associated with a node'''
357 if not self.nodetagscache:
357 if not self.nodetagscache:
358 self.nodetagscache = {}
358 self.nodetagscache = {}
359 for t, n in self.tags().iteritems():
359 for t, n in self.tags().iteritems():
360 self.nodetagscache.setdefault(n, []).append(t)
360 self.nodetagscache.setdefault(n, []).append(t)
361 return self.nodetagscache.get(node, [])
361 return self.nodetagscache.get(node, [])
362
362
363 def _branchtags(self, partial, lrev):
363 def _branchtags(self, partial, lrev):
364 # TODO: rename this function?
364 # TODO: rename this function?
365 tiprev = len(self) - 1
365 tiprev = len(self) - 1
366 if lrev != tiprev:
366 if lrev != tiprev:
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369
369
370 return partial
370 return partial
371
371
372 def branchmap(self):
372 def branchmap(self):
373 tip = self.changelog.tip()
373 tip = self.changelog.tip()
374 if self.branchcache is not None and self._branchcachetip == tip:
374 if self.branchcache is not None and self._branchcachetip == tip:
375 return self.branchcache
375 return self.branchcache
376
376
377 oldtip = self._branchcachetip
377 oldtip = self._branchcachetip
378 self._branchcachetip = tip
378 self._branchcachetip = tip
379 if self.branchcache is None:
379 if self.branchcache is None:
380 self.branchcache = {} # avoid recursion in changectx
380 self.branchcache = {} # avoid recursion in changectx
381 else:
381 else:
382 self.branchcache.clear() # keep using the same dict
382 self.branchcache.clear() # keep using the same dict
383 if oldtip is None or oldtip not in self.changelog.nodemap:
383 if oldtip is None or oldtip not in self.changelog.nodemap:
384 partial, last, lrev = self._readbranchcache()
384 partial, last, lrev = self._readbranchcache()
385 else:
385 else:
386 lrev = self.changelog.rev(oldtip)
386 lrev = self.changelog.rev(oldtip)
387 partial = self._ubranchcache
387 partial = self._ubranchcache
388
388
389 self._branchtags(partial, lrev)
389 self._branchtags(partial, lrev)
390 # this private cache holds all heads (not just tips)
390 # this private cache holds all heads (not just tips)
391 self._ubranchcache = partial
391 self._ubranchcache = partial
392
392
393 # the branch cache is stored on disk as UTF-8, but in the local
393 # the branch cache is stored on disk as UTF-8, but in the local
394 # charset internally
394 # charset internally
395 for k, v in partial.iteritems():
395 for k, v in partial.iteritems():
396 self.branchcache[encoding.tolocal(k)] = v
396 self.branchcache[encoding.tolocal(k)] = v
397 return self.branchcache
397 return self.branchcache
398
398
399
399
400 def branchtags(self):
400 def branchtags(self):
401 '''return a dict where branch names map to the tipmost head of
401 '''return a dict where branch names map to the tipmost head of
402 the branch, open heads come before closed'''
402 the branch, open heads come before closed'''
403 bt = {}
403 bt = {}
404 for bn, heads in self.branchmap().iteritems():
404 for bn, heads in self.branchmap().iteritems():
405 head = None
405 head = None
406 for i in range(len(heads)-1, -1, -1):
406 for i in range(len(heads)-1, -1, -1):
407 h = heads[i]
407 h = heads[i]
408 if 'close' not in self.changelog.read(h)[5]:
408 if 'close' not in self.changelog.read(h)[5]:
409 head = h
409 head = h
410 break
410 break
411 # no open heads were found
411 # no open heads were found
412 if head is None:
412 if head is None:
413 head = heads[-1]
413 head = heads[-1]
414 bt[bn] = head
414 bt[bn] = head
415 return bt
415 return bt
416
416
417
417
418 def _readbranchcache(self):
418 def _readbranchcache(self):
419 partial = {}
419 partial = {}
420 try:
420 try:
421 f = self.opener("branchheads.cache")
421 f = self.opener("branchheads.cache")
422 lines = f.read().split('\n')
422 lines = f.read().split('\n')
423 f.close()
423 f.close()
424 except (IOError, OSError):
424 except (IOError, OSError):
425 return {}, nullid, nullrev
425 return {}, nullid, nullrev
426
426
427 try:
427 try:
428 last, lrev = lines.pop(0).split(" ", 1)
428 last, lrev = lines.pop(0).split(" ", 1)
429 last, lrev = bin(last), int(lrev)
429 last, lrev = bin(last), int(lrev)
430 if lrev >= len(self) or self[lrev].node() != last:
430 if lrev >= len(self) or self[lrev].node() != last:
431 # invalidate the cache
431 # invalidate the cache
432 raise ValueError('invalidating branch cache (tip differs)')
432 raise ValueError('invalidating branch cache (tip differs)')
433 for l in lines:
433 for l in lines:
434 if not l: continue
434 if not l: continue
435 node, label = l.split(" ", 1)
435 node, label = l.split(" ", 1)
436 partial.setdefault(label.strip(), []).append(bin(node))
436 partial.setdefault(label.strip(), []).append(bin(node))
437 except KeyboardInterrupt:
437 except KeyboardInterrupt:
438 raise
438 raise
439 except Exception, inst:
439 except Exception, inst:
440 if self.ui.debugflag:
440 if self.ui.debugflag:
441 self.ui.warn(str(inst), '\n')
441 self.ui.warn(str(inst), '\n')
442 partial, last, lrev = {}, nullid, nullrev
442 partial, last, lrev = {}, nullid, nullrev
443 return partial, last, lrev
443 return partial, last, lrev
444
444
445 def _writebranchcache(self, branches, tip, tiprev):
445 def _writebranchcache(self, branches, tip, tiprev):
446 try:
446 try:
447 f = self.opener("branchheads.cache", "w", atomictemp=True)
447 f = self.opener("branchheads.cache", "w", atomictemp=True)
448 f.write("%s %s\n" % (hex(tip), tiprev))
448 f.write("%s %s\n" % (hex(tip), tiprev))
449 for label, nodes in branches.iteritems():
449 for label, nodes in branches.iteritems():
450 for node in nodes:
450 for node in nodes:
451 f.write("%s %s\n" % (hex(node), label))
451 f.write("%s %s\n" % (hex(node), label))
452 f.rename()
452 f.rename()
453 except (IOError, OSError):
453 except (IOError, OSError):
454 pass
454 pass
455
455
456 def _updatebranchcache(self, partial, start, end):
456 def _updatebranchcache(self, partial, start, end):
457 for r in xrange(start, end):
457 for r in xrange(start, end):
458 c = self[r]
458 c = self[r]
459 b = c.branch()
459 b = c.branch()
460 bheads = partial.setdefault(b, [])
460 bheads = partial.setdefault(b, [])
461 bheads.append(c.node())
461 bheads.append(c.node())
462 for p in c.parents():
462 for p in c.parents():
463 pn = p.node()
463 pn = p.node()
464 if pn in bheads:
464 if pn in bheads:
465 bheads.remove(pn)
465 bheads.remove(pn)
466
466
467 def lookup(self, key):
467 def lookup(self, key):
468 if isinstance(key, int):
468 if isinstance(key, int):
469 return self.changelog.node(key)
469 return self.changelog.node(key)
470 elif key == '.':
470 elif key == '.':
471 return self.dirstate.parents()[0]
471 return self.dirstate.parents()[0]
472 elif key == 'null':
472 elif key == 'null':
473 return nullid
473 return nullid
474 elif key == 'tip':
474 elif key == 'tip':
475 return self.changelog.tip()
475 return self.changelog.tip()
476 n = self.changelog._match(key)
476 n = self.changelog._match(key)
477 if n:
477 if n:
478 return n
478 return n
479 if key in self.tags():
479 if key in self.tags():
480 return self.tags()[key]
480 return self.tags()[key]
481 if key in self.branchtags():
481 if key in self.branchtags():
482 return self.branchtags()[key]
482 return self.branchtags()[key]
483 n = self.changelog._partialmatch(key)
483 n = self.changelog._partialmatch(key)
484 if n:
484 if n:
485 return n
485 return n
486
486
487 # can't find key, check if it might have come from damaged dirstate
487 # can't find key, check if it might have come from damaged dirstate
488 if key in self.dirstate.parents():
488 if key in self.dirstate.parents():
489 raise error.Abort(_("working directory has unknown parent '%s'!")
489 raise error.Abort(_("working directory has unknown parent '%s'!")
490 % short(key))
490 % short(key))
491 try:
491 try:
492 if len(key) == 20:
492 if len(key) == 20:
493 key = hex(key)
493 key = hex(key)
494 except:
494 except:
495 pass
495 pass
496 raise error.RepoError(_("unknown revision '%s'") % key)
496 raise error.RepoError(_("unknown revision '%s'") % key)
497
497
498 def local(self):
498 def local(self):
499 return True
499 return True
500
500
501 def join(self, f):
501 def join(self, f):
502 return os.path.join(self.path, f)
502 return os.path.join(self.path, f)
503
503
504 def wjoin(self, f):
504 def wjoin(self, f):
505 return os.path.join(self.root, f)
505 return os.path.join(self.root, f)
506
506
507 def rjoin(self, f):
507 def rjoin(self, f):
508 return os.path.join(self.root, util.pconvert(f))
508 return os.path.join(self.root, util.pconvert(f))
509
509
510 def file(self, f):
510 def file(self, f):
511 if f[0] == '/':
511 if f[0] == '/':
512 f = f[1:]
512 f = f[1:]
513 return filelog.filelog(self.sopener, f)
513 return filelog.filelog(self.sopener, f)
514
514
515 def changectx(self, changeid):
515 def changectx(self, changeid):
516 return self[changeid]
516 return self[changeid]
517
517
518 def parents(self, changeid=None):
518 def parents(self, changeid=None):
519 '''get list of changectxs for parents of changeid'''
519 '''get list of changectxs for parents of changeid'''
520 return self[changeid].parents()
520 return self[changeid].parents()
521
521
522 def filectx(self, path, changeid=None, fileid=None):
522 def filectx(self, path, changeid=None, fileid=None):
523 """changeid can be a changeset revision, node, or tag.
523 """changeid can be a changeset revision, node, or tag.
524 fileid can be a file revision or node."""
524 fileid can be a file revision or node."""
525 return context.filectx(self, path, changeid, fileid)
525 return context.filectx(self, path, changeid, fileid)
526
526
527 def getcwd(self):
527 def getcwd(self):
528 return self.dirstate.getcwd()
528 return self.dirstate.getcwd()
529
529
530 def pathto(self, f, cwd=None):
530 def pathto(self, f, cwd=None):
531 return self.dirstate.pathto(f, cwd)
531 return self.dirstate.pathto(f, cwd)
532
532
533 def wfile(self, f, mode='r'):
533 def wfile(self, f, mode='r'):
534 return self.wopener(f, mode)
534 return self.wopener(f, mode)
535
535
536 def _link(self, f):
536 def _link(self, f):
537 return os.path.islink(self.wjoin(f))
537 return os.path.islink(self.wjoin(f))
538
538
539 def _filter(self, filter, filename, data):
539 def _filter(self, filter, filename, data):
540 if filter not in self.filterpats:
540 if filter not in self.filterpats:
541 l = []
541 l = []
542 for pat, cmd in self.ui.configitems(filter):
542 for pat, cmd in self.ui.configitems(filter):
543 if cmd == '!':
543 if cmd == '!':
544 continue
544 continue
545 mf = match_.match(self.root, '', [pat])
545 mf = match_.match(self.root, '', [pat])
546 fn = None
546 fn = None
547 params = cmd
547 params = cmd
548 for name, filterfn in self._datafilters.iteritems():
548 for name, filterfn in self._datafilters.iteritems():
549 if cmd.startswith(name):
549 if cmd.startswith(name):
550 fn = filterfn
550 fn = filterfn
551 params = cmd[len(name):].lstrip()
551 params = cmd[len(name):].lstrip()
552 break
552 break
553 if not fn:
553 if not fn:
554 fn = lambda s, c, **kwargs: util.filter(s, c)
554 fn = lambda s, c, **kwargs: util.filter(s, c)
555 # Wrap old filters not supporting keyword arguments
555 # Wrap old filters not supporting keyword arguments
556 if not inspect.getargspec(fn)[2]:
556 if not inspect.getargspec(fn)[2]:
557 oldfn = fn
557 oldfn = fn
558 fn = lambda s, c, **kwargs: oldfn(s, c)
558 fn = lambda s, c, **kwargs: oldfn(s, c)
559 l.append((mf, fn, params))
559 l.append((mf, fn, params))
560 self.filterpats[filter] = l
560 self.filterpats[filter] = l
561
561
562 for mf, fn, cmd in self.filterpats[filter]:
562 for mf, fn, cmd in self.filterpats[filter]:
563 if mf(filename):
563 if mf(filename):
564 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
565 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
566 break
566 break
567
567
568 return data
568 return data
569
569
570 def adddatafilter(self, name, filter):
570 def adddatafilter(self, name, filter):
571 self._datafilters[name] = filter
571 self._datafilters[name] = filter
572
572
573 def wread(self, filename):
573 def wread(self, filename):
574 if self._link(filename):
574 if self._link(filename):
575 data = os.readlink(self.wjoin(filename))
575 data = os.readlink(self.wjoin(filename))
576 else:
576 else:
577 data = self.wopener(filename, 'r').read()
577 data = self.wopener(filename, 'r').read()
578 return self._filter("encode", filename, data)
578 return self._filter("encode", filename, data)
579
579
580 def wwrite(self, filename, data, flags):
580 def wwrite(self, filename, data, flags):
581 data = self._filter("decode", filename, data)
581 data = self._filter("decode", filename, data)
582 try:
582 try:
583 os.unlink(self.wjoin(filename))
583 os.unlink(self.wjoin(filename))
584 except OSError:
584 except OSError:
585 pass
585 pass
586 if 'l' in flags:
586 if 'l' in flags:
587 self.wopener.symlink(data, filename)
587 self.wopener.symlink(data, filename)
588 else:
588 else:
589 self.wopener(filename, 'w').write(data)
589 self.wopener(filename, 'w').write(data)
590 if 'x' in flags:
590 if 'x' in flags:
591 util.set_flags(self.wjoin(filename), False, True)
591 util.set_flags(self.wjoin(filename), False, True)
592
592
593 def wwritedata(self, filename, data):
593 def wwritedata(self, filename, data):
594 return self._filter("decode", filename, data)
594 return self._filter("decode", filename, data)
595
595
596 def transaction(self):
596 def transaction(self):
597 tr = self._transref and self._transref() or None
597 tr = self._transref and self._transref() or None
598 if tr and tr.running():
598 if tr and tr.running():
599 return tr.nest()
599 return tr.nest()
600
600
601 # abort here if the journal already exists
601 # abort here if the journal already exists
602 if os.path.exists(self.sjoin("journal")):
602 if os.path.exists(self.sjoin("journal")):
603 raise error.RepoError(_("journal already exists - run hg recover"))
603 raise error.RepoError(_("journal already exists - run hg recover"))
604
604
605 # save dirstate for rollback
605 # save dirstate for rollback
606 try:
606 try:
607 ds = self.opener("dirstate").read()
607 ds = self.opener("dirstate").read()
608 except IOError:
608 except IOError:
609 ds = ""
609 ds = ""
610 self.opener("journal.dirstate", "w").write(ds)
610 self.opener("journal.dirstate", "w").write(ds)
611 self.opener("journal.branch", "w").write(self.dirstate.branch())
611 self.opener("journal.branch", "w").write(self.dirstate.branch())
612
612
613 renames = [(self.sjoin("journal"), self.sjoin("undo")),
613 renames = [(self.sjoin("journal"), self.sjoin("undo")),
614 (self.join("journal.dirstate"), self.join("undo.dirstate")),
614 (self.join("journal.dirstate"), self.join("undo.dirstate")),
615 (self.join("journal.branch"), self.join("undo.branch"))]
615 (self.join("journal.branch"), self.join("undo.branch"))]
616 tr = transaction.transaction(self.ui.warn, self.sopener,
616 tr = transaction.transaction(self.ui.warn, self.sopener,
617 self.sjoin("journal"),
617 self.sjoin("journal"),
618 aftertrans(renames),
618 aftertrans(renames),
619 self.store.createmode)
619 self.store.createmode)
620 self._transref = weakref.ref(tr)
620 self._transref = weakref.ref(tr)
621 return tr
621 return tr
622
622
623 def recover(self):
623 def recover(self):
624 lock = self.lock()
624 lock = self.lock()
625 try:
625 try:
626 if os.path.exists(self.sjoin("journal")):
626 if os.path.exists(self.sjoin("journal")):
627 self.ui.status(_("rolling back interrupted transaction\n"))
627 self.ui.status(_("rolling back interrupted transaction\n"))
628 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
628 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
629 self.invalidate()
629 self.invalidate()
630 return True
630 return True
631 else:
631 else:
632 self.ui.warn(_("no interrupted transaction available\n"))
632 self.ui.warn(_("no interrupted transaction available\n"))
633 return False
633 return False
634 finally:
634 finally:
635 lock.release()
635 lock.release()
636
636
637 def rollback(self):
637 def rollback(self):
638 wlock = lock = None
638 wlock = lock = None
639 try:
639 try:
640 wlock = self.wlock()
640 wlock = self.wlock()
641 lock = self.lock()
641 lock = self.lock()
642 if os.path.exists(self.sjoin("undo")):
642 if os.path.exists(self.sjoin("undo")):
643 self.ui.status(_("rolling back last transaction\n"))
643 self.ui.status(_("rolling back last transaction\n"))
644 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
644 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
645 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
646 try:
646 try:
647 branch = self.opener("undo.branch").read()
647 branch = self.opener("undo.branch").read()
648 self.dirstate.setbranch(branch)
648 self.dirstate.setbranch(branch)
649 except IOError:
649 except IOError:
650 self.ui.warn(_("Named branch could not be reset, "
650 self.ui.warn(_("Named branch could not be reset, "
651 "current branch still is: %s\n")
651 "current branch still is: %s\n")
652 % encoding.tolocal(self.dirstate.branch()))
652 % encoding.tolocal(self.dirstate.branch()))
653 self.invalidate()
653 self.invalidate()
654 self.dirstate.invalidate()
654 self.dirstate.invalidate()
655 else:
655 else:
656 self.ui.warn(_("no rollback information available\n"))
656 self.ui.warn(_("no rollback information available\n"))
657 finally:
657 finally:
658 release(lock, wlock)
658 release(lock, wlock)
659
659
660 def invalidate(self):
660 def invalidate(self):
661 for a in "changelog manifest".split():
661 for a in "changelog manifest".split():
662 if a in self.__dict__:
662 if a in self.__dict__:
663 delattr(self, a)
663 delattr(self, a)
664 self.tagscache = None
664 self.tagscache = None
665 self._tagstypecache = None
665 self._tagstypecache = None
666 self.nodetagscache = None
666 self.nodetagscache = None
667 self.branchcache = None
667 self.branchcache = None
668 self._ubranchcache = None
668 self._ubranchcache = None
669 self._branchcachetip = None
669 self._branchcachetip = None
670
670
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 try:
672 try:
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 except error.LockHeld, inst:
674 except error.LockHeld, inst:
675 if not wait:
675 if not wait:
676 raise
676 raise
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 (desc, inst.locker))
678 (desc, inst.locker))
679 # default to 600 seconds timeout
679 # default to 600 seconds timeout
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 releasefn, desc=desc)
681 releasefn, desc=desc)
682 if acquirefn:
682 if acquirefn:
683 acquirefn()
683 acquirefn()
684 return l
684 return l
685
685
686 def lock(self, wait=True):
686 def lock(self, wait=True):
687 l = self._lockref and self._lockref()
687 l = self._lockref and self._lockref()
688 if l is not None and l.held:
688 if l is not None and l.held:
689 l.lock()
689 l.lock()
690 return l
690 return l
691
691
692 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
693 _('repository %s') % self.origroot)
693 _('repository %s') % self.origroot)
694 self._lockref = weakref.ref(l)
694 self._lockref = weakref.ref(l)
695 return l
695 return l
696
696
697 def wlock(self, wait=True):
697 def wlock(self, wait=True):
698 l = self._wlockref and self._wlockref()
698 l = self._wlockref and self._wlockref()
699 if l is not None and l.held:
699 if l is not None and l.held:
700 l.lock()
700 l.lock()
701 return l
701 return l
702
702
703 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
703 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
704 self.dirstate.invalidate, _('working directory of %s') %
704 self.dirstate.invalidate, _('working directory of %s') %
705 self.origroot)
705 self.origroot)
706 self._wlockref = weakref.ref(l)
706 self._wlockref = weakref.ref(l)
707 return l
707 return l
708
708
709 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
709 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
710 """
710 """
711 commit an individual file as part of a larger transaction
711 commit an individual file as part of a larger transaction
712 """
712 """
713
713
714 fname = fctx.path()
714 fname = fctx.path()
715 text = fctx.data()
715 text = fctx.data()
716 flog = self.file(fname)
716 flog = self.file(fname)
717 fparent1 = manifest1.get(fname, nullid)
717 fparent1 = manifest1.get(fname, nullid)
718 fparent2 = fparent2o = manifest2.get(fname, nullid)
718 fparent2 = fparent2o = manifest2.get(fname, nullid)
719
719
720 meta = {}
720 meta = {}
721 copy = fctx.renamed()
721 copy = fctx.renamed()
722 if copy and copy[0] != fname:
722 if copy and copy[0] != fname:
723 # Mark the new revision of this file as a copy of another
723 # Mark the new revision of this file as a copy of another
724 # file. This copy data will effectively act as a parent
724 # file. This copy data will effectively act as a parent
725 # of this new revision. If this is a merge, the first
725 # of this new revision. If this is a merge, the first
726 # parent will be the nullid (meaning "look up the copy data")
726 # parent will be the nullid (meaning "look up the copy data")
727 # and the second one will be the other parent. For example:
727 # and the second one will be the other parent. For example:
728 #
728 #
729 # 0 --- 1 --- 3 rev1 changes file foo
729 # 0 --- 1 --- 3 rev1 changes file foo
730 # \ / rev2 renames foo to bar and changes it
730 # \ / rev2 renames foo to bar and changes it
731 # \- 2 -/ rev3 should have bar with all changes and
731 # \- 2 -/ rev3 should have bar with all changes and
732 # should record that bar descends from
732 # should record that bar descends from
733 # bar in rev2 and foo in rev1
733 # bar in rev2 and foo in rev1
734 #
734 #
735 # this allows this merge to succeed:
735 # this allows this merge to succeed:
736 #
736 #
737 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
737 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
738 # \ / merging rev3 and rev4 should use bar@rev2
738 # \ / merging rev3 and rev4 should use bar@rev2
739 # \- 2 --- 4 as the merge base
739 # \- 2 --- 4 as the merge base
740 #
740 #
741
741
742 cfname = copy[0]
742 cfname = copy[0]
743 crev = manifest1.get(cfname)
743 crev = manifest1.get(cfname)
744 newfparent = fparent2
744 newfparent = fparent2
745
745
746 if manifest2: # branch merge
746 if manifest2: # branch merge
747 if fparent2 == nullid or crev is None: # copied on remote side
747 if fparent2 == nullid or crev is None: # copied on remote side
748 if cfname in manifest2:
748 if cfname in manifest2:
749 crev = manifest2[cfname]
749 crev = manifest2[cfname]
750 newfparent = fparent1
750 newfparent = fparent1
751
751
752 # find source in nearest ancestor if we've lost track
752 # find source in nearest ancestor if we've lost track
753 if not crev:
753 if not crev:
754 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
754 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
755 (fname, cfname))
755 (fname, cfname))
756 for ancestor in self['.'].ancestors():
756 for ancestor in self['.'].ancestors():
757 if cfname in ancestor:
757 if cfname in ancestor:
758 crev = ancestor[cfname].filenode()
758 crev = ancestor[cfname].filenode()
759 break
759 break
760
760
761 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
761 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
762 meta["copy"] = cfname
762 meta["copy"] = cfname
763 meta["copyrev"] = hex(crev)
763 meta["copyrev"] = hex(crev)
764 fparent1, fparent2 = nullid, newfparent
764 fparent1, fparent2 = nullid, newfparent
765 elif fparent2 != nullid:
765 elif fparent2 != nullid:
766 # is one parent an ancestor of the other?
766 # is one parent an ancestor of the other?
767 fparentancestor = flog.ancestor(fparent1, fparent2)
767 fparentancestor = flog.ancestor(fparent1, fparent2)
768 if fparentancestor == fparent1:
768 if fparentancestor == fparent1:
769 fparent1, fparent2 = fparent2, nullid
769 fparent1, fparent2 = fparent2, nullid
770 elif fparentancestor == fparent2:
770 elif fparentancestor == fparent2:
771 fparent2 = nullid
771 fparent2 = nullid
772
772
773 # is the file changed?
773 # is the file changed?
774 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
774 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
775 changelist.append(fname)
775 changelist.append(fname)
776 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
776 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
777
777
778 # are just the flags changed during merge?
778 # are just the flags changed during merge?
779 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
779 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
780 changelist.append(fname)
780 changelist.append(fname)
781
781
782 return fparent1
782 return fparent1
783
783
784 def commit(self, text="", user=None, date=None, match=None, force=False,
784 def commit(self, text="", user=None, date=None, match=None, force=False,
785 editor=False, extra={}):
785 editor=False, extra={}):
786 """Add a new revision to current repository.
786 """Add a new revision to current repository.
787
787
788 Revision information is gathered from the working directory,
788 Revision information is gathered from the working directory,
789 match can be used to filter the committed files. If editor is
789 match can be used to filter the committed files. If editor is
790 supplied, it is called to get a commit message.
790 supplied, it is called to get a commit message.
791 """
791 """
792
792
793 def fail(f, msg):
793 def fail(f, msg):
794 raise util.Abort('%s: %s' % (f, msg))
794 raise util.Abort('%s: %s' % (f, msg))
795
795
796 if not match:
796 if not match:
797 match = match_.always(self.root, '')
797 match = match_.always(self.root, '')
798
798
799 if not force:
799 if not force:
800 vdirs = []
800 vdirs = []
801 match.dir = vdirs.append
801 match.dir = vdirs.append
802 match.bad = fail
802 match.bad = fail
803
803
804 wlock = self.wlock()
804 wlock = self.wlock()
805 try:
805 try:
806 p1, p2 = self.dirstate.parents()
806 p1, p2 = self.dirstate.parents()
807 wctx = self[None]
807 wctx = self[None]
808
808
809 if (not force and p2 != nullid and match and
809 if (not force and p2 != nullid and match and
810 (match.files() or match.anypats())):
810 (match.files() or match.anypats())):
811 raise util.Abort(_('cannot partially commit a merge '
811 raise util.Abort(_('cannot partially commit a merge '
812 '(do not specify files or patterns)'))
812 '(do not specify files or patterns)'))
813
813
814 changes = self.status(match=match, clean=force)
814 changes = self.status(match=match, clean=force)
815 if force:
815 if force:
816 changes[0].extend(changes[6]) # mq may commit unchanged files
816 changes[0].extend(changes[6]) # mq may commit unchanged files
817
817
818 # check subrepos
818 # check subrepos
819 subs = []
819 subs = []
820 for s in wctx.substate:
820 for s in wctx.substate:
821 if match(s) and wctx.sub(s).dirty():
821 if match(s) and wctx.sub(s).dirty():
822 subs.append(s)
822 subs.append(s)
823 if subs and '.hgsubstate' not in changes[0]:
823 if subs and '.hgsubstate' not in changes[0]:
824 changes[0].insert(0, '.hgsubstate')
824 changes[0].insert(0, '.hgsubstate')
825
825
826 # make sure all explicit patterns are matched
826 # make sure all explicit patterns are matched
827 if not force and match.files():
827 if not force and match.files():
828 matched = set(changes[0] + changes[1] + changes[2])
828 matched = set(changes[0] + changes[1] + changes[2])
829
829
830 for f in match.files():
830 for f in match.files():
831 if f == '.' or f in matched or f in wctx.substate:
831 if f == '.' or f in matched or f in wctx.substate:
832 continue
832 continue
833 if f in changes[3]: # missing
833 if f in changes[3]: # missing
834 fail(f, _('file not found!'))
834 fail(f, _('file not found!'))
835 if f in vdirs: # visited directory
835 if f in vdirs: # visited directory
836 d = f + '/'
836 d = f + '/'
837 for mf in matched:
837 for mf in matched:
838 if mf.startswith(d):
838 if mf.startswith(d):
839 break
839 break
840 else:
840 else:
841 fail(f, _("no match under directory!"))
841 fail(f, _("no match under directory!"))
842 elif f not in self.dirstate:
842 elif f not in self.dirstate:
843 fail(f, _("file not tracked!"))
843 fail(f, _("file not tracked!"))
844
844
845 if (not force and not extra.get("close") and p2 == nullid
845 if (not force and not extra.get("close") and p2 == nullid
846 and not (changes[0] or changes[1] or changes[2])
846 and not (changes[0] or changes[1] or changes[2])
847 and self[None].branch() == self['.'].branch()):
847 and self[None].branch() == self['.'].branch()):
848 self.ui.status(_("nothing changed\n"))
848 self.ui.status(_("nothing changed\n"))
849 return None
849 return None
850
850
851 ms = merge_.mergestate(self)
851 ms = merge_.mergestate(self)
852 for f in changes[0]:
852 for f in changes[0]:
853 if f in ms and ms[f] == 'u':
853 if f in ms and ms[f] == 'u':
854 raise util.Abort(_("unresolved merge conflicts "
854 raise util.Abort(_("unresolved merge conflicts "
855 "(see hg resolve)"))
855 "(see hg resolve)"))
856
856
857 cctx = context.workingctx(self, (p1, p2), text, user, date,
857 cctx = context.workingctx(self, (p1, p2), text, user, date,
858 extra, changes)
858 extra, changes)
859 if editor:
859 if editor:
860 cctx._text = editor(self, cctx)
860 cctx._text = editor(self, cctx)
861
861
862 # commit subs
862 # commit subs
863 if subs:
863 if subs:
864 state = wctx.substate.copy()
864 state = wctx.substate.copy()
865 for s in subs:
865 for s in subs:
866 self.ui.status(_('committing subrepository %s\n') % s)
866 self.ui.status(_('committing subrepository %s\n') % s)
867 sr = wctx.sub(s).commit(cctx._text, user, date)
867 sr = wctx.sub(s).commit(cctx._text, user, date)
868 state[s] = (state[s][0], sr)
868 state[s] = (state[s][0], sr)
869 subrepo.writestate(self, state)
869 subrepo.writestate(self, state)
870
870
871 ret = self.commitctx(cctx, True)
871 ret = self.commitctx(cctx, True)
872
872
873 # update dirstate and mergestate
873 # update dirstate and mergestate
874 for f in changes[0] + changes[1]:
874 for f in changes[0] + changes[1]:
875 self.dirstate.normal(f)
875 self.dirstate.normal(f)
876 for f in changes[2]:
876 for f in changes[2]:
877 self.dirstate.forget(f)
877 self.dirstate.forget(f)
878 self.dirstate.setparents(ret)
878 self.dirstate.setparents(ret)
879 ms.reset()
879 ms.reset()
880
880
881 return ret
881 return ret
882
882
883 finally:
883 finally:
884 wlock.release()
884 wlock.release()
885
885
886 def commitctx(self, ctx, error=False):
886 def commitctx(self, ctx, error=False):
887 """Add a new revision to current repository.
887 """Add a new revision to current repository.
888
888
889 Revision information is passed via the context argument.
889 Revision information is passed via the context argument.
890 """
890 """
891
891
892 tr = lock = None
892 tr = lock = None
893 removed = ctx.removed()
893 removed = ctx.removed()
894 p1, p2 = ctx.p1(), ctx.p2()
894 p1, p2 = ctx.p1(), ctx.p2()
895 m1 = p1.manifest().copy()
895 m1 = p1.manifest().copy()
896 m2 = p2.manifest()
896 m2 = p2.manifest()
897 user = ctx.user()
897 user = ctx.user()
898
898
899 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
899 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
900 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
900 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
901
901
902 lock = self.lock()
902 lock = self.lock()
903 try:
903 try:
904 tr = self.transaction()
904 tr = self.transaction()
905 trp = weakref.proxy(tr)
905 trp = weakref.proxy(tr)
906
906
907 # check in files
907 # check in files
908 new = {}
908 new = {}
909 changed = []
909 changed = []
910 linkrev = len(self)
910 linkrev = len(self)
911 for f in sorted(ctx.modified() + ctx.added()):
911 for f in sorted(ctx.modified() + ctx.added()):
912 self.ui.note(f + "\n")
912 self.ui.note(f + "\n")
913 try:
913 try:
914 fctx = ctx[f]
914 fctx = ctx[f]
915 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
915 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
916 changed)
916 changed)
917 m1.set(f, fctx.flags())
917 m1.set(f, fctx.flags())
918 except (OSError, IOError):
918 except (OSError, IOError):
919 if error:
919 if error:
920 self.ui.warn(_("trouble committing %s!\n") % f)
920 self.ui.warn(_("trouble committing %s!\n") % f)
921 raise
921 raise
922 else:
922 else:
923 removed.append(f)
923 removed.append(f)
924
924
925 # update manifest
925 # update manifest
926 m1.update(new)
926 m1.update(new)
927 removed = [f for f in sorted(removed) if f in m1 or f in m2]
927 removed = [f for f in sorted(removed) if f in m1 or f in m2]
928 drop = [f for f in removed if f in m1]
928 drop = [f for f in removed if f in m1]
929 for f in drop:
929 for f in drop:
930 del m1[f]
930 del m1[f]
931 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
931 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
932 p2.manifestnode(), (new, drop))
932 p2.manifestnode(), (new, drop))
933
933
934 # update changelog
934 # update changelog
935 self.changelog.delayupdate()
935 self.changelog.delayupdate()
936 n = self.changelog.add(mn, changed + removed, ctx.description(),
936 n = self.changelog.add(mn, changed + removed, ctx.description(),
937 trp, p1.node(), p2.node(),
937 trp, p1.node(), p2.node(),
938 user, ctx.date(), ctx.extra().copy())
938 user, ctx.date(), ctx.extra().copy())
939 p = lambda: self.changelog.writepending() and self.root or ""
939 p = lambda: self.changelog.writepending() and self.root or ""
940 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
940 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
941 parent2=xp2, pending=p)
941 parent2=xp2, pending=p)
942 self.changelog.finalize(trp)
942 self.changelog.finalize(trp)
943 tr.close()
943 tr.close()
944
944
945 if self.branchcache:
945 if self.branchcache:
946 self.branchtags()
946 self.branchtags()
947
947
948 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
948 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
949 return n
949 return n
950 finally:
950 finally:
951 del tr
951 del tr
952 lock.release()
952 lock.release()
953
953
954 def walk(self, match, node=None):
954 def walk(self, match, node=None):
955 '''
955 '''
956 walk recursively through the directory tree or a given
956 walk recursively through the directory tree or a given
957 changeset, finding all files matched by the match
957 changeset, finding all files matched by the match
958 function
958 function
959 '''
959 '''
960 return self[node].walk(match)
960 return self[node].walk(match)
961
961
962 def status(self, node1='.', node2=None, match=None,
962 def status(self, node1='.', node2=None, match=None,
963 ignored=False, clean=False, unknown=False):
963 ignored=False, clean=False, unknown=False):
964 """return status of files between two nodes or node and working directory
964 """return status of files between two nodes or node and working directory
965
965
966 If node1 is None, use the first dirstate parent instead.
966 If node1 is None, use the first dirstate parent instead.
967 If node2 is None, compare node1 with working directory.
967 If node2 is None, compare node1 with working directory.
968 """
968 """
969
969
970 def mfmatches(ctx):
970 def mfmatches(ctx):
971 mf = ctx.manifest().copy()
971 mf = ctx.manifest().copy()
972 for fn in mf.keys():
972 for fn in mf.keys():
973 if not match(fn):
973 if not match(fn):
974 del mf[fn]
974 del mf[fn]
975 return mf
975 return mf
976
976
977 if isinstance(node1, context.changectx):
977 if isinstance(node1, context.changectx):
978 ctx1 = node1
978 ctx1 = node1
979 else:
979 else:
980 ctx1 = self[node1]
980 ctx1 = self[node1]
981 if isinstance(node2, context.changectx):
981 if isinstance(node2, context.changectx):
982 ctx2 = node2
982 ctx2 = node2
983 else:
983 else:
984 ctx2 = self[node2]
984 ctx2 = self[node2]
985
985
986 working = ctx2.rev() is None
986 working = ctx2.rev() is None
987 parentworking = working and ctx1 == self['.']
987 parentworking = working and ctx1 == self['.']
988 match = match or match_.always(self.root, self.getcwd())
988 match = match or match_.always(self.root, self.getcwd())
989 listignored, listclean, listunknown = ignored, clean, unknown
989 listignored, listclean, listunknown = ignored, clean, unknown
990
990
991 # load earliest manifest first for caching reasons
991 # load earliest manifest first for caching reasons
992 if not working and ctx2.rev() < ctx1.rev():
992 if not working and ctx2.rev() < ctx1.rev():
993 ctx2.manifest()
993 ctx2.manifest()
994
994
995 if not parentworking:
995 if not parentworking:
996 def bad(f, msg):
996 def bad(f, msg):
997 if f not in ctx1:
997 if f not in ctx1:
998 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
998 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
999 match.bad = bad
999 match.bad = bad
1000
1000
1001 if working: # we need to scan the working dir
1001 if working: # we need to scan the working dir
1002 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 s = self.dirstate.status(match, listignored, listclean, listunknown)
1003 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1004
1004
1005 # check for any possibly clean files
1005 # check for any possibly clean files
1006 if parentworking and cmp:
1006 if parentworking and cmp:
1007 fixup = []
1007 fixup = []
1008 # do a full compare of any files that might have changed
1008 # do a full compare of any files that might have changed
1009 for f in sorted(cmp):
1009 for f in sorted(cmp):
1010 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1011 or ctx1[f].cmp(ctx2[f].data())):
1011 or ctx1[f].cmp(ctx2[f].data())):
1012 modified.append(f)
1012 modified.append(f)
1013 else:
1013 else:
1014 fixup.append(f)
1014 fixup.append(f)
1015
1015
1016 if listclean:
1016 if listclean:
1017 clean += fixup
1017 clean += fixup
1018
1018
1019 # update dirstate for files that are actually clean
1019 # update dirstate for files that are actually clean
1020 if fixup:
1020 if fixup:
1021 try:
1021 try:
1022 # updating the dirstate is optional
1022 # updating the dirstate is optional
1023 # so we don't wait on the lock
1023 # so we don't wait on the lock
1024 wlock = self.wlock(False)
1024 wlock = self.wlock(False)
1025 try:
1025 try:
1026 for f in fixup:
1026 for f in fixup:
1027 self.dirstate.normal(f)
1027 self.dirstate.normal(f)
1028 finally:
1028 finally:
1029 wlock.release()
1029 wlock.release()
1030 except error.LockError:
1030 except error.LockError:
1031 pass
1031 pass
1032
1032
1033 if not parentworking:
1033 if not parentworking:
1034 mf1 = mfmatches(ctx1)
1034 mf1 = mfmatches(ctx1)
1035 if working:
1035 if working:
1036 # we are comparing working dir against non-parent
1036 # we are comparing working dir against non-parent
1037 # generate a pseudo-manifest for the working dir
1037 # generate a pseudo-manifest for the working dir
1038 mf2 = mfmatches(self['.'])
1038 mf2 = mfmatches(self['.'])
1039 for f in cmp + modified + added:
1039 for f in cmp + modified + added:
1040 mf2[f] = None
1040 mf2[f] = None
1041 mf2.set(f, ctx2.flags(f))
1041 mf2.set(f, ctx2.flags(f))
1042 for f in removed:
1042 for f in removed:
1043 if f in mf2:
1043 if f in mf2:
1044 del mf2[f]
1044 del mf2[f]
1045 else:
1045 else:
1046 # we are comparing two revisions
1046 # we are comparing two revisions
1047 deleted, unknown, ignored = [], [], []
1047 deleted, unknown, ignored = [], [], []
1048 mf2 = mfmatches(ctx2)
1048 mf2 = mfmatches(ctx2)
1049
1049
1050 modified, added, clean = [], [], []
1050 modified, added, clean = [], [], []
1051 for fn in mf2:
1051 for fn in mf2:
1052 if fn in mf1:
1052 if fn in mf1:
1053 if (mf1.flags(fn) != mf2.flags(fn) or
1053 if (mf1.flags(fn) != mf2.flags(fn) or
1054 (mf1[fn] != mf2[fn] and
1054 (mf1[fn] != mf2[fn] and
1055 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1055 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1056 modified.append(fn)
1056 modified.append(fn)
1057 elif listclean:
1057 elif listclean:
1058 clean.append(fn)
1058 clean.append(fn)
1059 del mf1[fn]
1059 del mf1[fn]
1060 else:
1060 else:
1061 added.append(fn)
1061 added.append(fn)
1062 removed = mf1.keys()
1062 removed = mf1.keys()
1063
1063
1064 r = modified, added, removed, deleted, unknown, ignored, clean
1064 r = modified, added, removed, deleted, unknown, ignored, clean
1065 [l.sort() for l in r]
1065 [l.sort() for l in r]
1066 return r
1066 return r
1067
1067
1068 def add(self, list):
1068 def add(self, list):
1069 wlock = self.wlock()
1069 wlock = self.wlock()
1070 try:
1070 try:
1071 rejected = []
1071 rejected = []
1072 for f in list:
1072 for f in list:
1073 p = self.wjoin(f)
1073 p = self.wjoin(f)
1074 try:
1074 try:
1075 st = os.lstat(p)
1075 st = os.lstat(p)
1076 except:
1076 except:
1077 self.ui.warn(_("%s does not exist!\n") % f)
1077 self.ui.warn(_("%s does not exist!\n") % f)
1078 rejected.append(f)
1078 rejected.append(f)
1079 continue
1079 continue
1080 if st.st_size > 10000000:
1080 if st.st_size > 10000000:
1081 self.ui.warn(_("%s: files over 10MB may cause memory and"
1081 self.ui.warn(_("%s: files over 10MB may cause memory and"
1082 " performance problems\n"
1082 " performance problems\n"
1083 "(use 'hg revert %s' to unadd the file)\n")
1083 "(use 'hg revert %s' to unadd the file)\n")
1084 % (f, f))
1084 % (f, f))
1085 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1085 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1086 self.ui.warn(_("%s not added: only files and symlinks "
1086 self.ui.warn(_("%s not added: only files and symlinks "
1087 "supported currently\n") % f)
1087 "supported currently\n") % f)
1088 rejected.append(p)
1088 rejected.append(p)
1089 elif self.dirstate[f] in 'amn':
1089 elif self.dirstate[f] in 'amn':
1090 self.ui.warn(_("%s already tracked!\n") % f)
1090 self.ui.warn(_("%s already tracked!\n") % f)
1091 elif self.dirstate[f] == 'r':
1091 elif self.dirstate[f] == 'r':
1092 self.dirstate.normallookup(f)
1092 self.dirstate.normallookup(f)
1093 else:
1093 else:
1094 self.dirstate.add(f)
1094 self.dirstate.add(f)
1095 return rejected
1095 return rejected
1096 finally:
1096 finally:
1097 wlock.release()
1097 wlock.release()
1098
1098
1099 def forget(self, list):
1099 def forget(self, list):
1100 wlock = self.wlock()
1100 wlock = self.wlock()
1101 try:
1101 try:
1102 for f in list:
1102 for f in list:
1103 if self.dirstate[f] != 'a':
1103 if self.dirstate[f] != 'a':
1104 self.ui.warn(_("%s not added!\n") % f)
1104 self.ui.warn(_("%s not added!\n") % f)
1105 else:
1105 else:
1106 self.dirstate.forget(f)
1106 self.dirstate.forget(f)
1107 finally:
1107 finally:
1108 wlock.release()
1108 wlock.release()
1109
1109
1110 def remove(self, list, unlink=False):
1110 def remove(self, list, unlink=False):
1111 if unlink:
1111 if unlink:
1112 for f in list:
1112 for f in list:
1113 try:
1113 try:
1114 util.unlink(self.wjoin(f))
1114 util.unlink(self.wjoin(f))
1115 except OSError, inst:
1115 except OSError, inst:
1116 if inst.errno != errno.ENOENT:
1116 if inst.errno != errno.ENOENT:
1117 raise
1117 raise
1118 wlock = self.wlock()
1118 wlock = self.wlock()
1119 try:
1119 try:
1120 for f in list:
1120 for f in list:
1121 if unlink and os.path.exists(self.wjoin(f)):
1121 if unlink and os.path.exists(self.wjoin(f)):
1122 self.ui.warn(_("%s still exists!\n") % f)
1122 self.ui.warn(_("%s still exists!\n") % f)
1123 elif self.dirstate[f] == 'a':
1123 elif self.dirstate[f] == 'a':
1124 self.dirstate.forget(f)
1124 self.dirstate.forget(f)
1125 elif f not in self.dirstate:
1125 elif f not in self.dirstate:
1126 self.ui.warn(_("%s not tracked!\n") % f)
1126 self.ui.warn(_("%s not tracked!\n") % f)
1127 else:
1127 else:
1128 self.dirstate.remove(f)
1128 self.dirstate.remove(f)
1129 finally:
1129 finally:
1130 wlock.release()
1130 wlock.release()
1131
1131
1132 def undelete(self, list):
1132 def undelete(self, list):
1133 manifests = [self.manifest.read(self.changelog.read(p)[0])
1133 manifests = [self.manifest.read(self.changelog.read(p)[0])
1134 for p in self.dirstate.parents() if p != nullid]
1134 for p in self.dirstate.parents() if p != nullid]
1135 wlock = self.wlock()
1135 wlock = self.wlock()
1136 try:
1136 try:
1137 for f in list:
1137 for f in list:
1138 if self.dirstate[f] != 'r':
1138 if self.dirstate[f] != 'r':
1139 self.ui.warn(_("%s not removed!\n") % f)
1139 self.ui.warn(_("%s not removed!\n") % f)
1140 else:
1140 else:
1141 m = f in manifests[0] and manifests[0] or manifests[1]
1141 m = f in manifests[0] and manifests[0] or manifests[1]
1142 t = self.file(f).read(m[f])
1142 t = self.file(f).read(m[f])
1143 self.wwrite(f, t, m.flags(f))
1143 self.wwrite(f, t, m.flags(f))
1144 self.dirstate.normal(f)
1144 self.dirstate.normal(f)
1145 finally:
1145 finally:
1146 wlock.release()
1146 wlock.release()
1147
1147
1148 def copy(self, source, dest):
1148 def copy(self, source, dest):
1149 p = self.wjoin(dest)
1149 p = self.wjoin(dest)
1150 if not (os.path.exists(p) or os.path.islink(p)):
1150 if not (os.path.exists(p) or os.path.islink(p)):
1151 self.ui.warn(_("%s does not exist!\n") % dest)
1151 self.ui.warn(_("%s does not exist!\n") % dest)
1152 elif not (os.path.isfile(p) or os.path.islink(p)):
1152 elif not (os.path.isfile(p) or os.path.islink(p)):
1153 self.ui.warn(_("copy failed: %s is not a file or a "
1153 self.ui.warn(_("copy failed: %s is not a file or a "
1154 "symbolic link\n") % dest)
1154 "symbolic link\n") % dest)
1155 else:
1155 else:
1156 wlock = self.wlock()
1156 wlock = self.wlock()
1157 try:
1157 try:
1158 if self.dirstate[dest] in '?r':
1158 if self.dirstate[dest] in '?r':
1159 self.dirstate.add(dest)
1159 self.dirstate.add(dest)
1160 self.dirstate.copy(source, dest)
1160 self.dirstate.copy(source, dest)
1161 finally:
1161 finally:
1162 wlock.release()
1162 wlock.release()
1163
1163
1164 def heads(self, start=None):
1164 def heads(self, start=None):
1165 heads = self.changelog.heads(start)
1165 heads = self.changelog.heads(start)
1166 # sort the output in rev descending order
1166 # sort the output in rev descending order
1167 heads = [(-self.changelog.rev(h), h) for h in heads]
1167 heads = [(-self.changelog.rev(h), h) for h in heads]
1168 return [n for (r, n) in sorted(heads)]
1168 return [n for (r, n) in sorted(heads)]
1169
1169
1170 def branchheads(self, branch=None, start=None, closed=False):
1170 def branchheads(self, branch=None, start=None, closed=False):
1171 if branch is None:
1171 if branch is None:
1172 branch = self[None].branch()
1172 branch = self[None].branch()
1173 branches = self.branchmap()
1173 branches = self.branchmap()
1174 if branch not in branches:
1174 if branch not in branches:
1175 return []
1175 return []
1176 bheads = branches[branch]
1176 bheads = branches[branch]
1177 # the cache returns heads ordered lowest to highest
1177 # the cache returns heads ordered lowest to highest
1178 bheads.reverse()
1178 bheads.reverse()
1179 if start is not None:
1179 if start is not None:
1180 # filter out the heads that cannot be reached from startrev
1180 # filter out the heads that cannot be reached from startrev
1181 bheads = self.changelog.nodesbetween([start], bheads)[2]
1181 bheads = self.changelog.nodesbetween([start], bheads)[2]
1182 if not closed:
1182 if not closed:
1183 bheads = [h for h in bheads if
1183 bheads = [h for h in bheads if
1184 ('close' not in self.changelog.read(h)[5])]
1184 ('close' not in self.changelog.read(h)[5])]
1185 return bheads
1185 return bheads
1186
1186
1187 def branches(self, nodes):
1187 def branches(self, nodes):
1188 if not nodes:
1188 if not nodes:
1189 nodes = [self.changelog.tip()]
1189 nodes = [self.changelog.tip()]
1190 b = []
1190 b = []
1191 for n in nodes:
1191 for n in nodes:
1192 t = n
1192 t = n
1193 while 1:
1193 while 1:
1194 p = self.changelog.parents(n)
1194 p = self.changelog.parents(n)
1195 if p[1] != nullid or p[0] == nullid:
1195 if p[1] != nullid or p[0] == nullid:
1196 b.append((t, n, p[0], p[1]))
1196 b.append((t, n, p[0], p[1]))
1197 break
1197 break
1198 n = p[0]
1198 n = p[0]
1199 return b
1199 return b
1200
1200
1201 def between(self, pairs):
1201 def between(self, pairs):
1202 r = []
1202 r = []
1203
1203
1204 for top, bottom in pairs:
1204 for top, bottom in pairs:
1205 n, l, i = top, [], 0
1205 n, l, i = top, [], 0
1206 f = 1
1206 f = 1
1207
1207
1208 while n != bottom and n != nullid:
1208 while n != bottom and n != nullid:
1209 p = self.changelog.parents(n)[0]
1209 p = self.changelog.parents(n)[0]
1210 if i == f:
1210 if i == f:
1211 l.append(n)
1211 l.append(n)
1212 f = f * 2
1212 f = f * 2
1213 n = p
1213 n = p
1214 i += 1
1214 i += 1
1215
1215
1216 r.append(l)
1216 r.append(l)
1217
1217
1218 return r
1218 return r
1219
1219
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1221 """Return list of roots of the subsets of missing nodes from remote
1221 """Return list of roots of the subsets of missing nodes from remote
1222
1222
1223 If base dict is specified, assume that these nodes and their parents
1223 If base dict is specified, assume that these nodes and their parents
1224 exist on the remote side and that no child of a node of base exists
1224 exist on the remote side and that no child of a node of base exists
1225 in both remote and self.
1225 in both remote and self.
1226 Furthermore base will be updated to include the nodes that exists
1226 Furthermore base will be updated to include the nodes that exists
1227 in self and remote but no children exists in self and remote.
1227 in self and remote but no children exists in self and remote.
1228 If a list of heads is specified, return only nodes which are heads
1228 If a list of heads is specified, return only nodes which are heads
1229 or ancestors of these heads.
1229 or ancestors of these heads.
1230
1230
1231 All the ancestors of base are in self and in remote.
1231 All the ancestors of base are in self and in remote.
1232 All the descendants of the list returned are missing in self.
1232 All the descendants of the list returned are missing in self.
1233 (and so we know that the rest of the nodes are missing in remote, see
1233 (and so we know that the rest of the nodes are missing in remote, see
1234 outgoing)
1234 outgoing)
1235 """
1235 """
1236 return self.findcommonincoming(remote, base, heads, force)[1]
1236 return self.findcommonincoming(remote, base, heads, force)[1]
1237
1237
1238 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1238 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1239 """Return a tuple (common, missing roots, heads) used to identify
1239 """Return a tuple (common, missing roots, heads) used to identify
1240 missing nodes from remote.
1240 missing nodes from remote.
1241
1241
1242 If base dict is specified, assume that these nodes and their parents
1242 If base dict is specified, assume that these nodes and their parents
1243 exist on the remote side and that no child of a node of base exists
1243 exist on the remote side and that no child of a node of base exists
1244 in both remote and self.
1244 in both remote and self.
1245 Furthermore base will be updated to include the nodes that exists
1245 Furthermore base will be updated to include the nodes that exists
1246 in self and remote but no children exists in self and remote.
1246 in self and remote but no children exists in self and remote.
1247 If a list of heads is specified, return only nodes which are heads
1247 If a list of heads is specified, return only nodes which are heads
1248 or ancestors of these heads.
1248 or ancestors of these heads.
1249
1249
1250 All the ancestors of base are in self and in remote.
1250 All the ancestors of base are in self and in remote.
1251 """
1251 """
1252 m = self.changelog.nodemap
1252 m = self.changelog.nodemap
1253 search = []
1253 search = []
1254 fetch = set()
1254 fetch = set()
1255 seen = set()
1255 seen = set()
1256 seenbranch = set()
1256 seenbranch = set()
1257 if base is None:
1257 if base is None:
1258 base = {}
1258 base = {}
1259
1259
1260 if not heads:
1260 if not heads:
1261 heads = remote.heads()
1261 heads = remote.heads()
1262
1262
1263 if self.changelog.tip() == nullid:
1263 if self.changelog.tip() == nullid:
1264 base[nullid] = 1
1264 base[nullid] = 1
1265 if heads != [nullid]:
1265 if heads != [nullid]:
1266 return [nullid], [nullid], list(heads)
1266 return [nullid], [nullid], list(heads)
1267 return [nullid], [], []
1267 return [nullid], [], []
1268
1268
1269 # assume we're closer to the tip than the root
1269 # assume we're closer to the tip than the root
1270 # and start by examining the heads
1270 # and start by examining the heads
1271 self.ui.status(_("searching for changes\n"))
1271 self.ui.status(_("searching for changes\n"))
1272
1272
1273 unknown = []
1273 unknown = []
1274 for h in heads:
1274 for h in heads:
1275 if h not in m:
1275 if h not in m:
1276 unknown.append(h)
1276 unknown.append(h)
1277 else:
1277 else:
1278 base[h] = 1
1278 base[h] = 1
1279
1279
1280 heads = unknown
1280 heads = unknown
1281 if not unknown:
1281 if not unknown:
1282 return base.keys(), [], []
1282 return base.keys(), [], []
1283
1283
1284 req = set(unknown)
1284 req = set(unknown)
1285 reqcnt = 0
1285 reqcnt = 0
1286
1286
1287 # search through remote branches
1287 # search through remote branches
1288 # a 'branch' here is a linear segment of history, with four parts:
1288 # a 'branch' here is a linear segment of history, with four parts:
1289 # head, root, first parent, second parent
1289 # head, root, first parent, second parent
1290 # (a branch always has two parents (or none) by definition)
1290 # (a branch always has two parents (or none) by definition)
1291 unknown = remote.branches(unknown)
1291 unknown = remote.branches(unknown)
1292 while unknown:
1292 while unknown:
1293 r = []
1293 r = []
1294 while unknown:
1294 while unknown:
1295 n = unknown.pop(0)
1295 n = unknown.pop(0)
1296 if n[0] in seen:
1296 if n[0] in seen:
1297 continue
1297 continue
1298
1298
1299 self.ui.debug(_("examining %s:%s\n")
1299 self.ui.debug(_("examining %s:%s\n")
1300 % (short(n[0]), short(n[1])))
1300 % (short(n[0]), short(n[1])))
1301 if n[0] == nullid: # found the end of the branch
1301 if n[0] == nullid: # found the end of the branch
1302 pass
1302 pass
1303 elif n in seenbranch:
1303 elif n in seenbranch:
1304 self.ui.debug(_("branch already found\n"))
1304 self.ui.debug(_("branch already found\n"))
1305 continue
1305 continue
1306 elif n[1] and n[1] in m: # do we know the base?
1306 elif n[1] and n[1] in m: # do we know the base?
1307 self.ui.debug(_("found incomplete branch %s:%s\n")
1307 self.ui.debug(_("found incomplete branch %s:%s\n")
1308 % (short(n[0]), short(n[1])))
1308 % (short(n[0]), short(n[1])))
1309 search.append(n[0:2]) # schedule branch range for scanning
1309 search.append(n[0:2]) # schedule branch range for scanning
1310 seenbranch.add(n)
1310 seenbranch.add(n)
1311 else:
1311 else:
1312 if n[1] not in seen and n[1] not in fetch:
1312 if n[1] not in seen and n[1] not in fetch:
1313 if n[2] in m and n[3] in m:
1313 if n[2] in m and n[3] in m:
1314 self.ui.debug(_("found new changeset %s\n") %
1314 self.ui.debug(_("found new changeset %s\n") %
1315 short(n[1]))
1315 short(n[1]))
1316 fetch.add(n[1]) # earliest unknown
1316 fetch.add(n[1]) # earliest unknown
1317 for p in n[2:4]:
1317 for p in n[2:4]:
1318 if p in m:
1318 if p in m:
1319 base[p] = 1 # latest known
1319 base[p] = 1 # latest known
1320
1320
1321 for p in n[2:4]:
1321 for p in n[2:4]:
1322 if p not in req and p not in m:
1322 if p not in req and p not in m:
1323 r.append(p)
1323 r.append(p)
1324 req.add(p)
1324 req.add(p)
1325 seen.add(n[0])
1325 seen.add(n[0])
1326
1326
1327 if r:
1327 if r:
1328 reqcnt += 1
1328 reqcnt += 1
1329 self.ui.debug(_("request %d: %s\n") %
1329 self.ui.debug(_("request %d: %s\n") %
1330 (reqcnt, " ".join(map(short, r))))
1330 (reqcnt, " ".join(map(short, r))))
1331 for p in xrange(0, len(r), 10):
1331 for p in xrange(0, len(r), 10):
1332 for b in remote.branches(r[p:p+10]):
1332 for b in remote.branches(r[p:p+10]):
1333 self.ui.debug(_("received %s:%s\n") %
1333 self.ui.debug(_("received %s:%s\n") %
1334 (short(b[0]), short(b[1])))
1334 (short(b[0]), short(b[1])))
1335 unknown.append(b)
1335 unknown.append(b)
1336
1336
1337 # do binary search on the branches we found
1337 # do binary search on the branches we found
1338 while search:
1338 while search:
1339 newsearch = []
1339 newsearch = []
1340 reqcnt += 1
1340 reqcnt += 1
1341 for n, l in zip(search, remote.between(search)):
1341 for n, l in zip(search, remote.between(search)):
1342 l.append(n[1])
1342 l.append(n[1])
1343 p = n[0]
1343 p = n[0]
1344 f = 1
1344 f = 1
1345 for i in l:
1345 for i in l:
1346 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1346 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1347 if i in m:
1347 if i in m:
1348 if f <= 2:
1348 if f <= 2:
1349 self.ui.debug(_("found new branch changeset %s\n") %
1349 self.ui.debug(_("found new branch changeset %s\n") %
1350 short(p))
1350 short(p))
1351 fetch.add(p)
1351 fetch.add(p)
1352 base[i] = 1
1352 base[i] = 1
1353 else:
1353 else:
1354 self.ui.debug(_("narrowed branch search to %s:%s\n")
1354 self.ui.debug(_("narrowed branch search to %s:%s\n")
1355 % (short(p), short(i)))
1355 % (short(p), short(i)))
1356 newsearch.append((p, i))
1356 newsearch.append((p, i))
1357 break
1357 break
1358 p, f = i, f * 2
1358 p, f = i, f * 2
1359 search = newsearch
1359 search = newsearch
1360
1360
1361 # sanity check our fetch list
1361 # sanity check our fetch list
1362 for f in fetch:
1362 for f in fetch:
1363 if f in m:
1363 if f in m:
1364 raise error.RepoError(_("already have changeset ")
1364 raise error.RepoError(_("already have changeset ")
1365 + short(f[:4]))
1365 + short(f[:4]))
1366
1366
1367 if base.keys() == [nullid]:
1367 if base.keys() == [nullid]:
1368 if force:
1368 if force:
1369 self.ui.warn(_("warning: repository is unrelated\n"))
1369 self.ui.warn(_("warning: repository is unrelated\n"))
1370 else:
1370 else:
1371 raise util.Abort(_("repository is unrelated"))
1371 raise util.Abort(_("repository is unrelated"))
1372
1372
1373 self.ui.debug(_("found new changesets starting at ") +
1373 self.ui.debug(_("found new changesets starting at ") +
1374 " ".join([short(f) for f in fetch]) + "\n")
1374 " ".join([short(f) for f in fetch]) + "\n")
1375
1375
1376 self.ui.debug(_("%d total queries\n") % reqcnt)
1376 self.ui.debug(_("%d total queries\n") % reqcnt)
1377
1377
1378 return base.keys(), list(fetch), heads
1378 return base.keys(), list(fetch), heads
1379
1379
1380 def findoutgoing(self, remote, base=None, heads=None, force=False):
1380 def findoutgoing(self, remote, base=None, heads=None, force=False):
1381 """Return list of nodes that are roots of subsets not in remote
1381 """Return list of nodes that are roots of subsets not in remote
1382
1382
1383 If base dict is specified, assume that these nodes and their parents
1383 If base dict is specified, assume that these nodes and their parents
1384 exist on the remote side.
1384 exist on the remote side.
1385 If a list of heads is specified, return only nodes which are heads
1385 If a list of heads is specified, return only nodes which are heads
1386 or ancestors of these heads, and return a second element which
1386 or ancestors of these heads, and return a second element which
1387 contains all remote heads which get new children.
1387 contains all remote heads which get new children.
1388 """
1388 """
1389 if base is None:
1389 if base is None:
1390 base = {}
1390 base = {}
1391 self.findincoming(remote, base, heads, force=force)
1391 self.findincoming(remote, base, heads, force=force)
1392
1392
1393 self.ui.debug(_("common changesets up to ")
1393 self.ui.debug(_("common changesets up to ")
1394 + " ".join(map(short, base.keys())) + "\n")
1394 + " ".join(map(short, base.keys())) + "\n")
1395
1395
1396 remain = set(self.changelog.nodemap)
1396 remain = set(self.changelog.nodemap)
1397
1397
1398 # prune everything remote has from the tree
1398 # prune everything remote has from the tree
1399 remain.remove(nullid)
1399 remain.remove(nullid)
1400 remove = base.keys()
1400 remove = base.keys()
1401 while remove:
1401 while remove:
1402 n = remove.pop(0)
1402 n = remove.pop(0)
1403 if n in remain:
1403 if n in remain:
1404 remain.remove(n)
1404 remain.remove(n)
1405 for p in self.changelog.parents(n):
1405 for p in self.changelog.parents(n):
1406 remove.append(p)
1406 remove.append(p)
1407
1407
1408 # find every node whose parents have been pruned
1408 # find every node whose parents have been pruned
1409 subset = []
1409 subset = []
1410 # find every remote head that will get new children
1410 # find every remote head that will get new children
1411 updated_heads = set()
1411 updated_heads = set()
1412 for n in remain:
1412 for n in remain:
1413 p1, p2 = self.changelog.parents(n)
1413 p1, p2 = self.changelog.parents(n)
1414 if p1 not in remain and p2 not in remain:
1414 if p1 not in remain and p2 not in remain:
1415 subset.append(n)
1415 subset.append(n)
1416 if heads:
1416 if heads:
1417 if p1 in heads:
1417 if p1 in heads:
1418 updated_heads.add(p1)
1418 updated_heads.add(p1)
1419 if p2 in heads:
1419 if p2 in heads:
1420 updated_heads.add(p2)
1420 updated_heads.add(p2)
1421
1421
1422 # this is the set of all roots we have to push
1422 # this is the set of all roots we have to push
1423 if heads:
1423 if heads:
1424 return subset, list(updated_heads)
1424 return subset, list(updated_heads)
1425 else:
1425 else:
1426 return subset
1426 return subset
1427
1427
1428 def pull(self, remote, heads=None, force=False):
1428 def pull(self, remote, heads=None, force=False):
1429 lock = self.lock()
1429 lock = self.lock()
1430 try:
1430 try:
1431 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1431 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1432 force=force)
1432 force=force)
1433 if fetch == [nullid]:
1433 if fetch == [nullid]:
1434 self.ui.status(_("requesting all changes\n"))
1434 self.ui.status(_("requesting all changes\n"))
1435
1435
1436 if not fetch:
1436 if not fetch:
1437 self.ui.status(_("no changes found\n"))
1437 self.ui.status(_("no changes found\n"))
1438 return 0
1438 return 0
1439
1439
1440 if heads is None and remote.capable('changegroupsubset'):
1440 if heads is None and remote.capable('changegroupsubset'):
1441 heads = rheads
1441 heads = rheads
1442
1442
1443 if heads is None:
1443 if heads is None:
1444 cg = remote.changegroup(fetch, 'pull')
1444 cg = remote.changegroup(fetch, 'pull')
1445 else:
1445 else:
1446 if not remote.capable('changegroupsubset'):
1446 if not remote.capable('changegroupsubset'):
1447 raise util.Abort(_("Partial pull cannot be done because "
1447 raise util.Abort(_("Partial pull cannot be done because "
1448 "other repository doesn't support "
1448 "other repository doesn't support "
1449 "changegroupsubset."))
1449 "changegroupsubset."))
1450 cg = remote.changegroupsubset(fetch, heads, 'pull')
1450 cg = remote.changegroupsubset(fetch, heads, 'pull')
1451 return self.addchangegroup(cg, 'pull', remote.url())
1451 return self.addchangegroup(cg, 'pull', remote.url())
1452 finally:
1452 finally:
1453 lock.release()
1453 lock.release()
1454
1454
1455 def push(self, remote, force=False, revs=None):
1455 def push(self, remote, force=False, revs=None):
1456 # there are two ways to push to remote repo:
1456 # there are two ways to push to remote repo:
1457 #
1457 #
1458 # addchangegroup assumes local user can lock remote
1458 # addchangegroup assumes local user can lock remote
1459 # repo (local filesystem, old ssh servers).
1459 # repo (local filesystem, old ssh servers).
1460 #
1460 #
1461 # unbundle assumes local user cannot lock remote repo (new ssh
1461 # unbundle assumes local user cannot lock remote repo (new ssh
1462 # servers, http servers).
1462 # servers, http servers).
1463
1463
1464 if remote.capable('unbundle'):
1464 if remote.capable('unbundle'):
1465 return self.push_unbundle(remote, force, revs)
1465 return self.push_unbundle(remote, force, revs)
1466 return self.push_addchangegroup(remote, force, revs)
1466 return self.push_addchangegroup(remote, force, revs)
1467
1467
1468 def prepush(self, remote, force, revs):
1468 def prepush(self, remote, force, revs):
1469 common = {}
1469 common = {}
1470 remote_heads = remote.heads()
1470 remote_heads = remote.heads()
1471 inc = self.findincoming(remote, common, remote_heads, force=force)
1471 inc = self.findincoming(remote, common, remote_heads, force=force)
1472
1472
1473 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1473 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1474 if revs is not None:
1474 if revs is not None:
1475 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1475 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1476 else:
1476 else:
1477 bases, heads = update, self.changelog.heads()
1477 bases, heads = update, self.changelog.heads()
1478
1478
1479 def checkbranch(lheads, rheads, updatelh):
1479 def checkbranch(lheads, rheads, updatelh):
1480 '''
1480 '''
1481 check whether there are more local heads than remote heads on
1481 check whether there are more local heads than remote heads on
1482 a specific branch.
1482 a specific branch.
1483
1483
1484 lheads: local branch heads
1484 lheads: local branch heads
1485 rheads: remote branch heads
1485 rheads: remote branch heads
1486 updatelh: outgoing local branch heads
1486 updatelh: outgoing local branch heads
1487 '''
1487 '''
1488
1488
1489 warn = 0
1489 warn = 0
1490
1490
1491 if not revs and len(lheads) > len(rheads):
1491 if not revs and len(lheads) > len(rheads):
1492 warn = 1
1492 warn = 1
1493 else:
1493 else:
1494 updatelheads = [self.changelog.heads(x, lheads)
1494 updatelheads = [self.changelog.heads(x, lheads)
1495 for x in updatelh]
1495 for x in updatelh]
1496 newheads = set(sum(updatelheads, [])) & set(lheads)
1496 newheads = set(sum(updatelheads, [])) & set(lheads)
1497
1497
1498 if not newheads:
1498 if not newheads:
1499 return True
1499 return True
1500
1500
1501 for r in rheads:
1501 for r in rheads:
1502 if r in self.changelog.nodemap:
1502 if r in self.changelog.nodemap:
1503 desc = self.changelog.heads(r, heads)
1503 desc = self.changelog.heads(r, heads)
1504 l = [h for h in heads if h in desc]
1504 l = [h for h in heads if h in desc]
1505 if not l:
1505 if not l:
1506 newheads.add(r)
1506 newheads.add(r)
1507 else:
1507 else:
1508 newheads.add(r)
1508 newheads.add(r)
1509 if len(newheads) > len(rheads):
1509 if len(newheads) > len(rheads):
1510 warn = 1
1510 warn = 1
1511
1511
1512 if warn:
1512 if warn:
1513 if not rheads: # new branch requires --force
1513 if not rheads: # new branch requires --force
1514 self.ui.warn(_("abort: push creates new"
1514 self.ui.warn(_("abort: push creates new"
1515 " remote branch '%s'!\n" %
1515 " remote branch '%s'!\n" %
1516 self[updatelh[0]].branch()))
1516 self[updatelh[0]].branch()))
1517 else:
1517 else:
1518 self.ui.warn(_("abort: push creates new remote heads!\n"))
1518 self.ui.warn(_("abort: push creates new remote heads!\n"))
1519
1519
1520 self.ui.status(_("(did you forget to merge?"
1520 self.ui.status(_("(did you forget to merge?"
1521 " use push -f to force)\n"))
1521 " use push -f to force)\n"))
1522 return False
1522 return False
1523 return True
1523 return True
1524
1524
1525 if not bases:
1525 if not bases:
1526 self.ui.status(_("no changes found\n"))
1526 self.ui.status(_("no changes found\n"))
1527 return None, 1
1527 return None, 1
1528 elif not force:
1528 elif not force:
1529 # Check for each named branch if we're creating new remote heads.
1529 # Check for each named branch if we're creating new remote heads.
1530 # To be a remote head after push, node must be either:
1530 # To be a remote head after push, node must be either:
1531 # - unknown locally
1531 # - unknown locally
1532 # - a local outgoing head descended from update
1532 # - a local outgoing head descended from update
1533 # - a remote head that's known locally and not
1533 # - a remote head that's known locally and not
1534 # ancestral to an outgoing head
1534 # ancestral to an outgoing head
1535 #
1535 #
1536 # New named branches cannot be created without --force.
1536 # New named branches cannot be created without --force.
1537
1537
1538 if remote_heads != [nullid]:
1538 if remote_heads != [nullid]:
1539 if remote.capable('branchmap'):
1539 if remote.capable('branchmap'):
1540 localhds = {}
1540 localhds = {}
1541 if not revs:
1541 if not revs:
1542 localhds = self.branchmap()
1542 localhds = self.branchmap()
1543 else:
1543 else:
1544 for n in heads:
1544 for n in heads:
1545 branch = self[n].branch()
1545 branch = self[n].branch()
1546 if branch in localhds:
1546 if branch in localhds:
1547 localhds[branch].append(n)
1547 localhds[branch].append(n)
1548 else:
1548 else:
1549 localhds[branch] = [n]
1549 localhds[branch] = [n]
1550
1550
1551 remotehds = remote.branchmap()
1551 remotehds = remote.branchmap()
1552
1552
1553 for lh in localhds:
1553 for lh in localhds:
1554 if lh in remotehds:
1554 if lh in remotehds:
1555 rheads = remotehds[lh]
1555 rheads = remotehds[lh]
1556 else:
1556 else:
1557 rheads = []
1557 rheads = []
1558 lheads = localhds[lh]
1558 lheads = localhds[lh]
1559 updatelh = [upd for upd in update
1559 updatelh = [upd for upd in update
1560 if self[upd].branch() == lh]
1560 if self[upd].branch() == lh]
1561 if not updatelh:
1561 if not updatelh:
1562 continue
1562 continue
1563 if not checkbranch(lheads, rheads, updatelh):
1563 if not checkbranch(lheads, rheads, updatelh):
1564 return None, 0
1564 return None, 0
1565 else:
1565 else:
1566 if not checkbranch(heads, remote_heads, update):
1566 if not checkbranch(heads, remote_heads, update):
1567 return None, 0
1567 return None, 0
1568
1568
1569 if inc:
1569 if inc:
1570 self.ui.warn(_("note: unsynced remote changes!\n"))
1570 self.ui.warn(_("note: unsynced remote changes!\n"))
1571
1571
1572
1572
1573 if revs is None:
1573 if revs is None:
1574 # use the fast path, no race possible on push
1574 # use the fast path, no race possible on push
1575 cg = self._changegroup(common.keys(), 'push')
1575 cg = self._changegroup(common.keys(), 'push')
1576 else:
1576 else:
1577 cg = self.changegroupsubset(update, revs, 'push')
1577 cg = self.changegroupsubset(update, revs, 'push')
1578 return cg, remote_heads
1578 return cg, remote_heads
1579
1579
1580 def push_addchangegroup(self, remote, force, revs):
1580 def push_addchangegroup(self, remote, force, revs):
1581 lock = remote.lock()
1581 lock = remote.lock()
1582 try:
1582 try:
1583 ret = self.prepush(remote, force, revs)
1583 ret = self.prepush(remote, force, revs)
1584 if ret[0] is not None:
1584 if ret[0] is not None:
1585 cg, remote_heads = ret
1585 cg, remote_heads = ret
1586 return remote.addchangegroup(cg, 'push', self.url())
1586 return remote.addchangegroup(cg, 'push', self.url())
1587 return ret[1]
1587 return ret[1]
1588 finally:
1588 finally:
1589 lock.release()
1589 lock.release()
1590
1590
1591 def push_unbundle(self, remote, force, revs):
1591 def push_unbundle(self, remote, force, revs):
1592 # local repo finds heads on server, finds out what revs it
1592 # local repo finds heads on server, finds out what revs it
1593 # must push. once revs transferred, if server finds it has
1593 # must push. once revs transferred, if server finds it has
1594 # different heads (someone else won commit/push race), server
1594 # different heads (someone else won commit/push race), server
1595 # aborts.
1595 # aborts.
1596
1596
1597 ret = self.prepush(remote, force, revs)
1597 ret = self.prepush(remote, force, revs)
1598 if ret[0] is not None:
1598 if ret[0] is not None:
1599 cg, remote_heads = ret
1599 cg, remote_heads = ret
1600 if force: remote_heads = ['force']
1600 if force: remote_heads = ['force']
1601 return remote.unbundle(cg, remote_heads, 'push')
1601 return remote.unbundle(cg, remote_heads, 'push')
1602 return ret[1]
1602 return ret[1]
1603
1603
1604 def changegroupinfo(self, nodes, source):
1604 def changegroupinfo(self, nodes, source):
1605 if self.ui.verbose or source == 'bundle':
1605 if self.ui.verbose or source == 'bundle':
1606 self.ui.status(_("%d changesets found\n") % len(nodes))
1606 self.ui.status(_("%d changesets found\n") % len(nodes))
1607 if self.ui.debugflag:
1607 if self.ui.debugflag:
1608 self.ui.debug(_("list of changesets:\n"))
1608 self.ui.debug(_("list of changesets:\n"))
1609 for node in nodes:
1609 for node in nodes:
1610 self.ui.debug("%s\n" % hex(node))
1610 self.ui.debug("%s\n" % hex(node))
1611
1611
1612 def changegroupsubset(self, bases, heads, source, extranodes=None):
1612 def changegroupsubset(self, bases, heads, source, extranodes=None):
1613 """This function generates a changegroup consisting of all the nodes
1613 """This function generates a changegroup consisting of all the nodes
1614 that are descendents of any of the bases, and ancestors of any of
1614 that are descendents of any of the bases, and ancestors of any of
1615 the heads.
1615 the heads.
1616
1616
1617 It is fairly complex as determining which filenodes and which
1617 It is fairly complex as determining which filenodes and which
1618 manifest nodes need to be included for the changeset to be complete
1618 manifest nodes need to be included for the changeset to be complete
1619 is non-trivial.
1619 is non-trivial.
1620
1620
1621 Another wrinkle is doing the reverse, figuring out which changeset in
1621 Another wrinkle is doing the reverse, figuring out which changeset in
1622 the changegroup a particular filenode or manifestnode belongs to.
1622 the changegroup a particular filenode or manifestnode belongs to.
1623
1623
1624 The caller can specify some nodes that must be included in the
1624 The caller can specify some nodes that must be included in the
1625 changegroup using the extranodes argument. It should be a dict
1625 changegroup using the extranodes argument. It should be a dict
1626 where the keys are the filenames (or 1 for the manifest), and the
1626 where the keys are the filenames (or 1 for the manifest), and the
1627 values are lists of (node, linknode) tuples, where node is a wanted
1627 values are lists of (node, linknode) tuples, where node is a wanted
1628 node and linknode is the changelog node that should be transmitted as
1628 node and linknode is the changelog node that should be transmitted as
1629 the linkrev.
1629 the linkrev.
1630 """
1630 """
1631
1631
1632 if extranodes is None:
1632 if extranodes is None:
1633 # can we go through the fast path ?
1633 # can we go through the fast path ?
1634 heads.sort()
1634 heads.sort()
1635 allheads = self.heads()
1635 allheads = self.heads()
1636 allheads.sort()
1636 allheads.sort()
1637 if heads == allheads:
1637 if heads == allheads:
1638 common = []
1638 common = []
1639 # parents of bases are known from both sides
1639 # parents of bases are known from both sides
1640 for n in bases:
1640 for n in bases:
1641 for p in self.changelog.parents(n):
1641 for p in self.changelog.parents(n):
1642 if p != nullid:
1642 if p != nullid:
1643 common.append(p)
1643 common.append(p)
1644 return self._changegroup(common, source)
1644 return self._changegroup(common, source)
1645
1645
1646 self.hook('preoutgoing', throw=True, source=source)
1646 self.hook('preoutgoing', throw=True, source=source)
1647
1647
1648 # Set up some initial variables
1648 # Set up some initial variables
1649 # Make it easy to refer to self.changelog
1649 # Make it easy to refer to self.changelog
1650 cl = self.changelog
1650 cl = self.changelog
1651 # msng is short for missing - compute the list of changesets in this
1651 # msng is short for missing - compute the list of changesets in this
1652 # changegroup.
1652 # changegroup.
1653 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1653 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1654 self.changegroupinfo(msng_cl_lst, source)
1654 self.changegroupinfo(msng_cl_lst, source)
1655 # Some bases may turn out to be superfluous, and some heads may be
1655 # Some bases may turn out to be superfluous, and some heads may be
1656 # too. nodesbetween will return the minimal set of bases and heads
1656 # too. nodesbetween will return the minimal set of bases and heads
1657 # necessary to re-create the changegroup.
1657 # necessary to re-create the changegroup.
1658
1658
1659 # Known heads are the list of heads that it is assumed the recipient
1659 # Known heads are the list of heads that it is assumed the recipient
1660 # of this changegroup will know about.
1660 # of this changegroup will know about.
1661 knownheads = set()
1661 knownheads = set()
1662 # We assume that all parents of bases are known heads.
1662 # We assume that all parents of bases are known heads.
1663 for n in bases:
1663 for n in bases:
1664 knownheads.update(cl.parents(n))
1664 knownheads.update(cl.parents(n))
1665 knownheads.discard(nullid)
1665 knownheads.discard(nullid)
1666 knownheads = list(knownheads)
1666 knownheads = list(knownheads)
1667 if knownheads:
1667 if knownheads:
1668 # Now that we know what heads are known, we can compute which
1668 # Now that we know what heads are known, we can compute which
1669 # changesets are known. The recipient must know about all
1669 # changesets are known. The recipient must know about all
1670 # changesets required to reach the known heads from the null
1670 # changesets required to reach the known heads from the null
1671 # changeset.
1671 # changeset.
1672 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1672 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1673 junk = None
1673 junk = None
1674 # Transform the list into a set.
1674 # Transform the list into a set.
1675 has_cl_set = set(has_cl_set)
1675 has_cl_set = set(has_cl_set)
1676 else:
1676 else:
1677 # If there were no known heads, the recipient cannot be assumed to
1677 # If there were no known heads, the recipient cannot be assumed to
1678 # know about any changesets.
1678 # know about any changesets.
1679 has_cl_set = set()
1679 has_cl_set = set()
1680
1680
1681 # Make it easy to refer to self.manifest
1681 # Make it easy to refer to self.manifest
1682 mnfst = self.manifest
1682 mnfst = self.manifest
1683 # We don't know which manifests are missing yet
1683 # We don't know which manifests are missing yet
1684 msng_mnfst_set = {}
1684 msng_mnfst_set = {}
1685 # Nor do we know which filenodes are missing.
1685 # Nor do we know which filenodes are missing.
1686 msng_filenode_set = {}
1686 msng_filenode_set = {}
1687
1687
1688 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1688 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1689 junk = None
1689 junk = None
1690
1690
1691 # A changeset always belongs to itself, so the changenode lookup
1691 # A changeset always belongs to itself, so the changenode lookup
1692 # function for a changenode is identity.
1692 # function for a changenode is identity.
1693 def identity(x):
1693 def identity(x):
1694 return x
1694 return x
1695
1695
1696 # A function generating function. Sets up an environment for the
1696 # A function generating function. Sets up an environment for the
1697 # inner function.
1697 # inner function.
1698 def cmp_by_rev_func(revlog):
1698 def cmp_by_rev_func(revlog):
1699 # Compare two nodes by their revision number in the environment's
1699 # Compare two nodes by their revision number in the environment's
1700 # revision history. Since the revision number both represents the
1700 # revision history. Since the revision number both represents the
1701 # most efficient order to read the nodes in, and represents a
1701 # most efficient order to read the nodes in, and represents a
1702 # topological sorting of the nodes, this function is often useful.
1702 # topological sorting of the nodes, this function is often useful.
1703 def cmp_by_rev(a, b):
1703 def cmp_by_rev(a, b):
1704 return cmp(revlog.rev(a), revlog.rev(b))
1704 return cmp(revlog.rev(a), revlog.rev(b))
1705 return cmp_by_rev
1705 return cmp_by_rev
1706
1706
1707 # If we determine that a particular file or manifest node must be a
1707 # If we determine that a particular file or manifest node must be a
1708 # node that the recipient of the changegroup will already have, we can
1708 # node that the recipient of the changegroup will already have, we can
1709 # also assume the recipient will have all the parents. This function
1709 # also assume the recipient will have all the parents. This function
1710 # prunes them from the set of missing nodes.
1710 # prunes them from the set of missing nodes.
1711 def prune_parents(revlog, hasset, msngset):
1711 def prune_parents(revlog, hasset, msngset):
1712 haslst = list(hasset)
1712 haslst = list(hasset)
1713 haslst.sort(cmp_by_rev_func(revlog))
1713 haslst.sort(cmp_by_rev_func(revlog))
1714 for node in haslst:
1714 for node in haslst:
1715 parentlst = [p for p in revlog.parents(node) if p != nullid]
1715 parentlst = [p for p in revlog.parents(node) if p != nullid]
1716 while parentlst:
1716 while parentlst:
1717 n = parentlst.pop()
1717 n = parentlst.pop()
1718 if n not in hasset:
1718 if n not in hasset:
1719 hasset.add(n)
1719 hasset.add(n)
1720 p = [p for p in revlog.parents(n) if p != nullid]
1720 p = [p for p in revlog.parents(n) if p != nullid]
1721 parentlst.extend(p)
1721 parentlst.extend(p)
1722 for n in hasset:
1722 for n in hasset:
1723 msngset.pop(n, None)
1723 msngset.pop(n, None)
1724
1724
1725 # This is a function generating function used to set up an environment
1725 # This is a function generating function used to set up an environment
1726 # for the inner function to execute in.
1726 # for the inner function to execute in.
1727 def manifest_and_file_collector(changedfileset):
1727 def manifest_and_file_collector(changedfileset):
1728 # This is an information gathering function that gathers
1728 # This is an information gathering function that gathers
1729 # information from each changeset node that goes out as part of
1729 # information from each changeset node that goes out as part of
1730 # the changegroup. The information gathered is a list of which
1730 # the changegroup. The information gathered is a list of which
1731 # manifest nodes are potentially required (the recipient may
1731 # manifest nodes are potentially required (the recipient may
1732 # already have them) and total list of all files which were
1732 # already have them) and total list of all files which were
1733 # changed in any changeset in the changegroup.
1733 # changed in any changeset in the changegroup.
1734 #
1734 #
1735 # We also remember the first changenode we saw any manifest
1735 # We also remember the first changenode we saw any manifest
1736 # referenced by so we can later determine which changenode 'owns'
1736 # referenced by so we can later determine which changenode 'owns'
1737 # the manifest.
1737 # the manifest.
1738 def collect_manifests_and_files(clnode):
1738 def collect_manifests_and_files(clnode):
1739 c = cl.read(clnode)
1739 c = cl.read(clnode)
1740 for f in c[3]:
1740 for f in c[3]:
1741 # This is to make sure we only have one instance of each
1741 # This is to make sure we only have one instance of each
1742 # filename string for each filename.
1742 # filename string for each filename.
1743 changedfileset.setdefault(f, f)
1743 changedfileset.setdefault(f, f)
1744 msng_mnfst_set.setdefault(c[0], clnode)
1744 msng_mnfst_set.setdefault(c[0], clnode)
1745 return collect_manifests_and_files
1745 return collect_manifests_and_files
1746
1746
1747 # Figure out which manifest nodes (of the ones we think might be part
1747 # Figure out which manifest nodes (of the ones we think might be part
1748 # of the changegroup) the recipient must know about and remove them
1748 # of the changegroup) the recipient must know about and remove them
1749 # from the changegroup.
1749 # from the changegroup.
1750 def prune_manifests():
1750 def prune_manifests():
1751 has_mnfst_set = set()
1751 has_mnfst_set = set()
1752 for n in msng_mnfst_set:
1752 for n in msng_mnfst_set:
1753 # If a 'missing' manifest thinks it belongs to a changenode
1753 # If a 'missing' manifest thinks it belongs to a changenode
1754 # the recipient is assumed to have, obviously the recipient
1754 # the recipient is assumed to have, obviously the recipient
1755 # must have that manifest.
1755 # must have that manifest.
1756 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1756 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1757 if linknode in has_cl_set:
1757 if linknode in has_cl_set:
1758 has_mnfst_set.add(n)
1758 has_mnfst_set.add(n)
1759 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1759 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1760
1760
1761 # Use the information collected in collect_manifests_and_files to say
1761 # Use the information collected in collect_manifests_and_files to say
1762 # which changenode any manifestnode belongs to.
1762 # which changenode any manifestnode belongs to.
1763 def lookup_manifest_link(mnfstnode):
1763 def lookup_manifest_link(mnfstnode):
1764 return msng_mnfst_set[mnfstnode]
1764 return msng_mnfst_set[mnfstnode]
1765
1765
1766 # A function generating function that sets up the initial environment
1766 # A function generating function that sets up the initial environment
1767 # the inner function.
1767 # the inner function.
1768 def filenode_collector(changedfiles):
1768 def filenode_collector(changedfiles):
1769 next_rev = [0]
1769 next_rev = [0]
1770 # This gathers information from each manifestnode included in the
1770 # This gathers information from each manifestnode included in the
1771 # changegroup about which filenodes the manifest node references
1771 # changegroup about which filenodes the manifest node references
1772 # so we can include those in the changegroup too.
1772 # so we can include those in the changegroup too.
1773 #
1773 #
1774 # It also remembers which changenode each filenode belongs to. It
1774 # It also remembers which changenode each filenode belongs to. It
1775 # does this by assuming the a filenode belongs to the changenode
1775 # does this by assuming the a filenode belongs to the changenode
1776 # the first manifest that references it belongs to.
1776 # the first manifest that references it belongs to.
1777 def collect_msng_filenodes(mnfstnode):
1777 def collect_msng_filenodes(mnfstnode):
1778 r = mnfst.rev(mnfstnode)
1778 r = mnfst.rev(mnfstnode)
1779 if r == next_rev[0]:
1779 if r == next_rev[0]:
1780 # If the last rev we looked at was the one just previous,
1780 # If the last rev we looked at was the one just previous,
1781 # we only need to see a diff.
1781 # we only need to see a diff.
1782 deltamf = mnfst.readdelta(mnfstnode)
1782 deltamf = mnfst.readdelta(mnfstnode)
1783 # For each line in the delta
1783 # For each line in the delta
1784 for f, fnode in deltamf.iteritems():
1784 for f, fnode in deltamf.iteritems():
1785 f = changedfiles.get(f, None)
1785 f = changedfiles.get(f, None)
1786 # And if the file is in the list of files we care
1786 # And if the file is in the list of files we care
1787 # about.
1787 # about.
1788 if f is not None:
1788 if f is not None:
1789 # Get the changenode this manifest belongs to
1789 # Get the changenode this manifest belongs to
1790 clnode = msng_mnfst_set[mnfstnode]
1790 clnode = msng_mnfst_set[mnfstnode]
1791 # Create the set of filenodes for the file if
1791 # Create the set of filenodes for the file if
1792 # there isn't one already.
1792 # there isn't one already.
1793 ndset = msng_filenode_set.setdefault(f, {})
1793 ndset = msng_filenode_set.setdefault(f, {})
1794 # And set the filenode's changelog node to the
1794 # And set the filenode's changelog node to the
1795 # manifest's if it hasn't been set already.
1795 # manifest's if it hasn't been set already.
1796 ndset.setdefault(fnode, clnode)
1796 ndset.setdefault(fnode, clnode)
1797 else:
1797 else:
1798 # Otherwise we need a full manifest.
1798 # Otherwise we need a full manifest.
1799 m = mnfst.read(mnfstnode)
1799 m = mnfst.read(mnfstnode)
1800 # For every file in we care about.
1800 # For every file in we care about.
1801 for f in changedfiles:
1801 for f in changedfiles:
1802 fnode = m.get(f, None)
1802 fnode = m.get(f, None)
1803 # If it's in the manifest
1803 # If it's in the manifest
1804 if fnode is not None:
1804 if fnode is not None:
1805 # See comments above.
1805 # See comments above.
1806 clnode = msng_mnfst_set[mnfstnode]
1806 clnode = msng_mnfst_set[mnfstnode]
1807 ndset = msng_filenode_set.setdefault(f, {})
1807 ndset = msng_filenode_set.setdefault(f, {})
1808 ndset.setdefault(fnode, clnode)
1808 ndset.setdefault(fnode, clnode)
1809 # Remember the revision we hope to see next.
1809 # Remember the revision we hope to see next.
1810 next_rev[0] = r + 1
1810 next_rev[0] = r + 1
1811 return collect_msng_filenodes
1811 return collect_msng_filenodes
1812
1812
1813 # We have a list of filenodes we think we need for a file, lets remove
1813 # We have a list of filenodes we think we need for a file, lets remove
1814 # all those we know the recipient must have.
1814 # all those we know the recipient must have.
1815 def prune_filenodes(f, filerevlog):
1815 def prune_filenodes(f, filerevlog):
1816 msngset = msng_filenode_set[f]
1816 msngset = msng_filenode_set[f]
1817 hasset = set()
1817 hasset = set()
1818 # If a 'missing' filenode thinks it belongs to a changenode we
1818 # If a 'missing' filenode thinks it belongs to a changenode we
1819 # assume the recipient must have, then the recipient must have
1819 # assume the recipient must have, then the recipient must have
1820 # that filenode.
1820 # that filenode.
1821 for n in msngset:
1821 for n in msngset:
1822 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1822 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1823 if clnode in has_cl_set:
1823 if clnode in has_cl_set:
1824 hasset.add(n)
1824 hasset.add(n)
1825 prune_parents(filerevlog, hasset, msngset)
1825 prune_parents(filerevlog, hasset, msngset)
1826
1826
1827 # A function generator function that sets up the a context for the
1827 # A function generator function that sets up the a context for the
1828 # inner function.
1828 # inner function.
1829 def lookup_filenode_link_func(fname):
1829 def lookup_filenode_link_func(fname):
1830 msngset = msng_filenode_set[fname]
1830 msngset = msng_filenode_set[fname]
1831 # Lookup the changenode the filenode belongs to.
1831 # Lookup the changenode the filenode belongs to.
1832 def lookup_filenode_link(fnode):
1832 def lookup_filenode_link(fnode):
1833 return msngset[fnode]
1833 return msngset[fnode]
1834 return lookup_filenode_link
1834 return lookup_filenode_link
1835
1835
1836 # Add the nodes that were explicitly requested.
1836 # Add the nodes that were explicitly requested.
1837 def add_extra_nodes(name, nodes):
1837 def add_extra_nodes(name, nodes):
1838 if not extranodes or name not in extranodes:
1838 if not extranodes or name not in extranodes:
1839 return
1839 return
1840
1840
1841 for node, linknode in extranodes[name]:
1841 for node, linknode in extranodes[name]:
1842 if node not in nodes:
1842 if node not in nodes:
1843 nodes[node] = linknode
1843 nodes[node] = linknode
1844
1844
1845 # Now that we have all theses utility functions to help out and
1845 # Now that we have all theses utility functions to help out and
1846 # logically divide up the task, generate the group.
1846 # logically divide up the task, generate the group.
1847 def gengroup():
1847 def gengroup():
1848 # The set of changed files starts empty.
1848 # The set of changed files starts empty.
1849 changedfiles = {}
1849 changedfiles = {}
1850 # Create a changenode group generator that will call our functions
1850 # Create a changenode group generator that will call our functions
1851 # back to lookup the owning changenode and collect information.
1851 # back to lookup the owning changenode and collect information.
1852 group = cl.group(msng_cl_lst, identity,
1852 group = cl.group(msng_cl_lst, identity,
1853 manifest_and_file_collector(changedfiles))
1853 manifest_and_file_collector(changedfiles))
1854 for chnk in group:
1854 for chnk in group:
1855 yield chnk
1855 yield chnk
1856
1856
1857 # The list of manifests has been collected by the generator
1857 # The list of manifests has been collected by the generator
1858 # calling our functions back.
1858 # calling our functions back.
1859 prune_manifests()
1859 prune_manifests()
1860 add_extra_nodes(1, msng_mnfst_set)
1860 add_extra_nodes(1, msng_mnfst_set)
1861 msng_mnfst_lst = msng_mnfst_set.keys()
1861 msng_mnfst_lst = msng_mnfst_set.keys()
1862 # Sort the manifestnodes by revision number.
1862 # Sort the manifestnodes by revision number.
1863 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1863 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1864 # Create a generator for the manifestnodes that calls our lookup
1864 # Create a generator for the manifestnodes that calls our lookup
1865 # and data collection functions back.
1865 # and data collection functions back.
1866 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1866 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1867 filenode_collector(changedfiles))
1867 filenode_collector(changedfiles))
1868 for chnk in group:
1868 for chnk in group:
1869 yield chnk
1869 yield chnk
1870
1870
1871 # These are no longer needed, dereference and toss the memory for
1871 # These are no longer needed, dereference and toss the memory for
1872 # them.
1872 # them.
1873 msng_mnfst_lst = None
1873 msng_mnfst_lst = None
1874 msng_mnfst_set.clear()
1874 msng_mnfst_set.clear()
1875
1875
1876 if extranodes:
1876 if extranodes:
1877 for fname in extranodes:
1877 for fname in extranodes:
1878 if isinstance(fname, int):
1878 if isinstance(fname, int):
1879 continue
1879 continue
1880 msng_filenode_set.setdefault(fname, {})
1880 msng_filenode_set.setdefault(fname, {})
1881 changedfiles[fname] = 1
1881 changedfiles[fname] = 1
1882 # Go through all our files in order sorted by name.
1882 # Go through all our files in order sorted by name.
1883 for fname in sorted(changedfiles):
1883 for fname in sorted(changedfiles):
1884 filerevlog = self.file(fname)
1884 filerevlog = self.file(fname)
1885 if not len(filerevlog):
1885 if not len(filerevlog):
1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1887 # Toss out the filenodes that the recipient isn't really
1887 # Toss out the filenodes that the recipient isn't really
1888 # missing.
1888 # missing.
1889 if fname in msng_filenode_set:
1889 if fname in msng_filenode_set:
1890 prune_filenodes(fname, filerevlog)
1890 prune_filenodes(fname, filerevlog)
1891 add_extra_nodes(fname, msng_filenode_set[fname])
1891 add_extra_nodes(fname, msng_filenode_set[fname])
1892 msng_filenode_lst = msng_filenode_set[fname].keys()
1892 msng_filenode_lst = msng_filenode_set[fname].keys()
1893 else:
1893 else:
1894 msng_filenode_lst = []
1894 msng_filenode_lst = []
1895 # If any filenodes are left, generate the group for them,
1895 # If any filenodes are left, generate the group for them,
1896 # otherwise don't bother.
1896 # otherwise don't bother.
1897 if len(msng_filenode_lst) > 0:
1897 if len(msng_filenode_lst) > 0:
1898 yield changegroup.chunkheader(len(fname))
1898 yield changegroup.chunkheader(len(fname))
1899 yield fname
1899 yield fname
1900 # Sort the filenodes by their revision #
1900 # Sort the filenodes by their revision #
1901 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1901 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1902 # Create a group generator and only pass in a changenode
1902 # Create a group generator and only pass in a changenode
1903 # lookup function as we need to collect no information
1903 # lookup function as we need to collect no information
1904 # from filenodes.
1904 # from filenodes.
1905 group = filerevlog.group(msng_filenode_lst,
1905 group = filerevlog.group(msng_filenode_lst,
1906 lookup_filenode_link_func(fname))
1906 lookup_filenode_link_func(fname))
1907 for chnk in group:
1907 for chnk in group:
1908 yield chnk
1908 yield chnk
1909 if fname in msng_filenode_set:
1909 if fname in msng_filenode_set:
1910 # Don't need this anymore, toss it to free memory.
1910 # Don't need this anymore, toss it to free memory.
1911 del msng_filenode_set[fname]
1911 del msng_filenode_set[fname]
1912 # Signal that no more groups are left.
1912 # Signal that no more groups are left.
1913 yield changegroup.closechunk()
1913 yield changegroup.closechunk()
1914
1914
1915 if msng_cl_lst:
1915 if msng_cl_lst:
1916 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1916 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1917
1917
1918 return util.chunkbuffer(gengroup())
1918 return util.chunkbuffer(gengroup())
1919
1919
1920 def changegroup(self, basenodes, source):
1920 def changegroup(self, basenodes, source):
1921 # to avoid a race we use changegroupsubset() (issue1320)
1921 # to avoid a race we use changegroupsubset() (issue1320)
1922 return self.changegroupsubset(basenodes, self.heads(), source)
1922 return self.changegroupsubset(basenodes, self.heads(), source)
1923
1923
1924 def _changegroup(self, common, source):
1924 def _changegroup(self, common, source):
1925 """Generate a changegroup of all nodes that we have that a recipient
1925 """Generate a changegroup of all nodes that we have that a recipient
1926 doesn't.
1926 doesn't.
1927
1927
1928 This is much easier than the previous function as we can assume that
1928 This is much easier than the previous function as we can assume that
1929 the recipient has any changenode we aren't sending them.
1929 the recipient has any changenode we aren't sending them.
1930
1930
1931 common is the set of common nodes between remote and self"""
1931 common is the set of common nodes between remote and self"""
1932
1932
1933 self.hook('preoutgoing', throw=True, source=source)
1933 self.hook('preoutgoing', throw=True, source=source)
1934
1934
1935 cl = self.changelog
1935 cl = self.changelog
1936 nodes = cl.findmissing(common)
1936 nodes = cl.findmissing(common)
1937 revset = set([cl.rev(n) for n in nodes])
1937 revset = set([cl.rev(n) for n in nodes])
1938 self.changegroupinfo(nodes, source)
1938 self.changegroupinfo(nodes, source)
1939
1939
1940 def identity(x):
1940 def identity(x):
1941 return x
1941 return x
1942
1942
1943 def gennodelst(log):
1943 def gennodelst(log):
1944 for r in log:
1944 for r in log:
1945 if log.linkrev(r) in revset:
1945 if log.linkrev(r) in revset:
1946 yield log.node(r)
1946 yield log.node(r)
1947
1947
1948 def changed_file_collector(changedfileset):
1948 def changed_file_collector(changedfileset):
1949 def collect_changed_files(clnode):
1949 def collect_changed_files(clnode):
1950 c = cl.read(clnode)
1950 c = cl.read(clnode)
1951 changedfileset.update(c[3])
1951 changedfileset.update(c[3])
1952 return collect_changed_files
1952 return collect_changed_files
1953
1953
1954 def lookuprevlink_func(revlog):
1954 def lookuprevlink_func(revlog):
1955 def lookuprevlink(n):
1955 def lookuprevlink(n):
1956 return cl.node(revlog.linkrev(revlog.rev(n)))
1956 return cl.node(revlog.linkrev(revlog.rev(n)))
1957 return lookuprevlink
1957 return lookuprevlink
1958
1958
1959 def gengroup():
1959 def gengroup():
1960 # construct a list of all changed files
1960 # construct a list of all changed files
1961 changedfiles = set()
1961 changedfiles = set()
1962
1962
1963 for chnk in cl.group(nodes, identity,
1963 for chnk in cl.group(nodes, identity,
1964 changed_file_collector(changedfiles)):
1964 changed_file_collector(changedfiles)):
1965 yield chnk
1965 yield chnk
1966
1966
1967 mnfst = self.manifest
1967 mnfst = self.manifest
1968 nodeiter = gennodelst(mnfst)
1968 nodeiter = gennodelst(mnfst)
1969 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1969 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1970 yield chnk
1970 yield chnk
1971
1971
1972 for fname in sorted(changedfiles):
1972 for fname in sorted(changedfiles):
1973 filerevlog = self.file(fname)
1973 filerevlog = self.file(fname)
1974 if not len(filerevlog):
1974 if not len(filerevlog):
1975 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 raise util.Abort(_("empty or missing revlog for %s") % fname)
1976 nodeiter = gennodelst(filerevlog)
1976 nodeiter = gennodelst(filerevlog)
1977 nodeiter = list(nodeiter)
1977 nodeiter = list(nodeiter)
1978 if nodeiter:
1978 if nodeiter:
1979 yield changegroup.chunkheader(len(fname))
1979 yield changegroup.chunkheader(len(fname))
1980 yield fname
1980 yield fname
1981 lookup = lookuprevlink_func(filerevlog)
1981 lookup = lookuprevlink_func(filerevlog)
1982 for chnk in filerevlog.group(nodeiter, lookup):
1982 for chnk in filerevlog.group(nodeiter, lookup):
1983 yield chnk
1983 yield chnk
1984
1984
1985 yield changegroup.closechunk()
1985 yield changegroup.closechunk()
1986
1986
1987 if nodes:
1987 if nodes:
1988 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988 self.hook('outgoing', node=hex(nodes[0]), source=source)
1989
1989
1990 return util.chunkbuffer(gengroup())
1990 return util.chunkbuffer(gengroup())
1991
1991
1992 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 def addchangegroup(self, source, srctype, url, emptyok=False):
1993 """add changegroup to repo.
1993 """add changegroup to repo.
1994
1994
1995 return values:
1995 return values:
1996 - nothing changed or no source: 0
1996 - nothing changed or no source: 0
1997 - more heads than before: 1+added heads (2..n)
1997 - more heads than before: 1+added heads (2..n)
1998 - less heads than before: -1-removed heads (-2..-n)
1998 - less heads than before: -1-removed heads (-2..-n)
1999 - number of heads stays the same: 1
1999 - number of heads stays the same: 1
2000 """
2000 """
2001 def csmap(x):
2001 def csmap(x):
2002 self.ui.debug(_("add changeset %s\n") % short(x))
2002 self.ui.debug(_("add changeset %s\n") % short(x))
2003 return len(cl)
2003 return len(cl)
2004
2004
2005 def revmap(x):
2005 def revmap(x):
2006 return cl.rev(x)
2006 return cl.rev(x)
2007
2007
2008 if not source:
2008 if not source:
2009 return 0
2009 return 0
2010
2010
2011 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2012
2012
2013 changesets = files = revisions = 0
2013 changesets = files = revisions = 0
2014
2014
2015 # write changelog data to temp files so concurrent readers will not see
2015 # write changelog data to temp files so concurrent readers will not see
2016 # inconsistent view
2016 # inconsistent view
2017 cl = self.changelog
2017 cl = self.changelog
2018 cl.delayupdate()
2018 cl.delayupdate()
2019 oldheads = len(cl.heads())
2019 oldheads = len(cl.heads())
2020
2020
2021 tr = self.transaction()
2021 tr = self.transaction()
2022 try:
2022 try:
2023 trp = weakref.proxy(tr)
2023 trp = weakref.proxy(tr)
2024 # pull off the changeset group
2024 # pull off the changeset group
2025 self.ui.status(_("adding changesets\n"))
2025 self.ui.status(_("adding changesets\n"))
2026 clstart = len(cl)
2026 clstart = len(cl)
2027 chunkiter = changegroup.chunkiter(source)
2027 chunkiter = changegroup.chunkiter(source)
2028 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2028 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2029 raise util.Abort(_("received changelog group is empty"))
2029 raise util.Abort(_("received changelog group is empty"))
2030 clend = len(cl)
2030 clend = len(cl)
2031 changesets = clend - clstart
2031 changesets = clend - clstart
2032
2032
2033 # pull off the manifest group
2033 # pull off the manifest group
2034 self.ui.status(_("adding manifests\n"))
2034 self.ui.status(_("adding manifests\n"))
2035 chunkiter = changegroup.chunkiter(source)
2035 chunkiter = changegroup.chunkiter(source)
2036 # no need to check for empty manifest group here:
2036 # no need to check for empty manifest group here:
2037 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2037 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2038 # no new manifest will be created and the manifest group will
2038 # no new manifest will be created and the manifest group will
2039 # be empty during the pull
2039 # be empty during the pull
2040 self.manifest.addgroup(chunkiter, revmap, trp)
2040 self.manifest.addgroup(chunkiter, revmap, trp)
2041
2041
2042 # process the files
2042 # process the files
2043 self.ui.status(_("adding file changes\n"))
2043 self.ui.status(_("adding file changes\n"))
2044 while 1:
2044 while 1:
2045 f = changegroup.getchunk(source)
2045 f = changegroup.getchunk(source)
2046 if not f:
2046 if not f:
2047 break
2047 break
2048 self.ui.debug(_("adding %s revisions\n") % f)
2048 self.ui.debug(_("adding %s revisions\n") % f)
2049 fl = self.file(f)
2049 fl = self.file(f)
2050 o = len(fl)
2050 o = len(fl)
2051 chunkiter = changegroup.chunkiter(source)
2051 chunkiter = changegroup.chunkiter(source)
2052 if fl.addgroup(chunkiter, revmap, trp) is None:
2052 if fl.addgroup(chunkiter, revmap, trp) is None:
2053 raise util.Abort(_("received file revlog group is empty"))
2053 raise util.Abort(_("received file revlog group is empty"))
2054 revisions += len(fl) - o
2054 revisions += len(fl) - o
2055 files += 1
2055 files += 1
2056
2056
2057 newheads = len(cl.heads())
2057 newheads = len(cl.heads())
2058 heads = ""
2058 heads = ""
2059 if oldheads and newheads != oldheads:
2059 if oldheads and newheads != oldheads:
2060 heads = _(" (%+d heads)") % (newheads - oldheads)
2060 heads = _(" (%+d heads)") % (newheads - oldheads)
2061
2061
2062 self.ui.status(_("added %d changesets"
2062 self.ui.status(_("added %d changesets"
2063 " with %d changes to %d files%s\n")
2063 " with %d changes to %d files%s\n")
2064 % (changesets, revisions, files, heads))
2064 % (changesets, revisions, files, heads))
2065
2065
2066 if changesets > 0:
2066 if changesets > 0:
2067 p = lambda: cl.writepending() and self.root or ""
2067 p = lambda: cl.writepending() and self.root or ""
2068 self.hook('pretxnchangegroup', throw=True,
2068 self.hook('pretxnchangegroup', throw=True,
2069 node=hex(cl.node(clstart)), source=srctype,
2069 node=hex(cl.node(clstart)), source=srctype,
2070 url=url, pending=p)
2070 url=url, pending=p)
2071
2071
2072 # make changelog see real files again
2072 # make changelog see real files again
2073 cl.finalize(trp)
2073 cl.finalize(trp)
2074
2074
2075 tr.close()
2075 tr.close()
2076 finally:
2076 finally:
2077 del tr
2077 del tr
2078
2078
2079 if changesets > 0:
2079 if changesets > 0:
2080 # forcefully update the on-disk branch cache
2080 # forcefully update the on-disk branch cache
2081 self.ui.debug(_("updating the branch cache\n"))
2081 self.ui.debug(_("updating the branch cache\n"))
2082 self.branchtags()
2082 self.branchtags()
2083 self.hook("changegroup", node=hex(cl.node(clstart)),
2083 self.hook("changegroup", node=hex(cl.node(clstart)),
2084 source=srctype, url=url)
2084 source=srctype, url=url)
2085
2085
2086 for i in xrange(clstart, clend):
2086 for i in xrange(clstart, clend):
2087 self.hook("incoming", node=hex(cl.node(i)),
2087 self.hook("incoming", node=hex(cl.node(i)),
2088 source=srctype, url=url)
2088 source=srctype, url=url)
2089
2089
2090 # never return 0 here:
2090 # never return 0 here:
2091 if newheads < oldheads:
2091 if newheads < oldheads:
2092 return newheads - oldheads - 1
2092 return newheads - oldheads - 1
2093 else:
2093 else:
2094 return newheads - oldheads + 1
2094 return newheads - oldheads + 1
2095
2095
2096
2096
2097 def stream_in(self, remote):
2097 def stream_in(self, remote):
2098 fp = remote.stream_out()
2098 fp = remote.stream_out()
2099 l = fp.readline()
2099 l = fp.readline()
2100 try:
2100 try:
2101 resp = int(l)
2101 resp = int(l)
2102 except ValueError:
2102 except ValueError:
2103 raise error.ResponseError(
2103 raise error.ResponseError(
2104 _('Unexpected response from remote server:'), l)
2104 _('Unexpected response from remote server:'), l)
2105 if resp == 1:
2105 if resp == 1:
2106 raise util.Abort(_('operation forbidden by server'))
2106 raise util.Abort(_('operation forbidden by server'))
2107 elif resp == 2:
2107 elif resp == 2:
2108 raise util.Abort(_('locking the remote repository failed'))
2108 raise util.Abort(_('locking the remote repository failed'))
2109 elif resp != 0:
2109 elif resp != 0:
2110 raise util.Abort(_('the server sent an unknown error code'))
2110 raise util.Abort(_('the server sent an unknown error code'))
2111 self.ui.status(_('streaming all changes\n'))
2111 self.ui.status(_('streaming all changes\n'))
2112 l = fp.readline()
2112 l = fp.readline()
2113 try:
2113 try:
2114 total_files, total_bytes = map(int, l.split(' ', 1))
2114 total_files, total_bytes = map(int, l.split(' ', 1))
2115 except (ValueError, TypeError):
2115 except (ValueError, TypeError):
2116 raise error.ResponseError(
2116 raise error.ResponseError(
2117 _('Unexpected response from remote server:'), l)
2117 _('Unexpected response from remote server:'), l)
2118 self.ui.status(_('%d files to transfer, %s of data\n') %
2118 self.ui.status(_('%d files to transfer, %s of data\n') %
2119 (total_files, util.bytecount(total_bytes)))
2119 (total_files, util.bytecount(total_bytes)))
2120 start = time.time()
2120 start = time.time()
2121 for i in xrange(total_files):
2121 for i in xrange(total_files):
2122 # XXX doesn't support '\n' or '\r' in filenames
2122 # XXX doesn't support '\n' or '\r' in filenames
2123 l = fp.readline()
2123 l = fp.readline()
2124 try:
2124 try:
2125 name, size = l.split('\0', 1)
2125 name, size = l.split('\0', 1)
2126 size = int(size)
2126 size = int(size)
2127 except (ValueError, TypeError):
2127 except (ValueError, TypeError):
2128 raise error.ResponseError(
2128 raise error.ResponseError(
2129 _('Unexpected response from remote server:'), l)
2129 _('Unexpected response from remote server:'), l)
2130 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2130 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2131 # for backwards compat, name was partially encoded
2131 # for backwards compat, name was partially encoded
2132 ofp = self.sopener(store.decodedir(name), 'w')
2132 ofp = self.sopener(store.decodedir(name), 'w')
2133 for chunk in util.filechunkiter(fp, limit=size):
2133 for chunk in util.filechunkiter(fp, limit=size):
2134 ofp.write(chunk)
2134 ofp.write(chunk)
2135 ofp.close()
2135 ofp.close()
2136 elapsed = time.time() - start
2136 elapsed = time.time() - start
2137 if elapsed <= 0:
2137 if elapsed <= 0:
2138 elapsed = 0.001
2138 elapsed = 0.001
2139 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2139 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2140 (util.bytecount(total_bytes), elapsed,
2140 (util.bytecount(total_bytes), elapsed,
2141 util.bytecount(total_bytes / elapsed)))
2141 util.bytecount(total_bytes / elapsed)))
2142 self.invalidate()
2142 self.invalidate()
2143 return len(self.heads()) + 1
2143 return len(self.heads()) + 1
2144
2144
2145 def clone(self, remote, heads=[], stream=False):
2145 def clone(self, remote, heads=[], stream=False):
2146 '''clone remote repository.
2146 '''clone remote repository.
2147
2147
2148 keyword arguments:
2148 keyword arguments:
2149 heads: list of revs to clone (forces use of pull)
2149 heads: list of revs to clone (forces use of pull)
2150 stream: use streaming clone if possible'''
2150 stream: use streaming clone if possible'''
2151
2151
2152 # now, all clients that can request uncompressed clones can
2152 # now, all clients that can request uncompressed clones can
2153 # read repo formats supported by all servers that can serve
2153 # read repo formats supported by all servers that can serve
2154 # them.
2154 # them.
2155
2155
2156 # if revlog format changes, client will have to check version
2156 # if revlog format changes, client will have to check version
2157 # and format flags on "stream" capability, and use
2157 # and format flags on "stream" capability, and use
2158 # uncompressed only if compatible.
2158 # uncompressed only if compatible.
2159
2159
2160 if stream and not heads and remote.capable('stream'):
2160 if stream and not heads and remote.capable('stream'):
2161 return self.stream_in(remote)
2161 return self.stream_in(remote)
2162 return self.pull(remote, heads)
2162 return self.pull(remote, heads)
2163
2163
2164 # used to avoid circular references so destructors work
2164 # used to avoid circular references so destructors work
2165 def aftertrans(files):
2165 def aftertrans(files):
2166 renamefiles = [tuple(t) for t in files]
2166 renamefiles = [tuple(t) for t in files]
2167 def a():
2167 def a():
2168 for src, dest in renamefiles:
2168 for src, dest in renamefiles:
2169 util.rename(src, dest)
2169 util.rename(src, dest)
2170 return a
2170 return a
2171
2171
2172 def instance(ui, path, create):
2172 def instance(ui, path, create):
2173 return localrepository(ui, util.drop_scheme('file', path), create)
2173 return localrepository(ui, util.drop_scheme('file', path), create)
2174
2174
2175 def islocal(path):
2175 def islocal(path):
2176 return True
2176 return True
@@ -1,83 +1,81 b''
1 000000000000 tip
1 000000000000 tip
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0acdaf898367 tip
3 0acdaf898367 tip
4 tip 0:0acdaf898367
4 tip 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 tip 1:8a3ca90d111d
7 tip 1:8a3ca90d111d
8 first 0:0acdaf898367
8 first 0:0acdaf898367
9 8a3ca90d111d tip
9 8a3ca90d111d tip
10 M a
10 M a
11 8a3ca90d111d+ tip
11 8a3ca90d111d+ tip
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 0acdaf898367+ first
13 0acdaf898367+ first
14 0acdaf898367+ first
14 0acdaf898367+ first
15 M a
15 M a
16 created new head
16 created new head
17 8216907a933d tip
17 8216907a933d tip
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 (branch merge, don't forget to commit)
19 (branch merge, don't forget to commit)
20 8216907a933d+8a3ca90d111d+ tip
20 8216907a933d+8a3ca90d111d+ tip
21 M .hgtags
21 M .hgtags
22 tip 6:e2174d339386
22 tip 6:e2174d339386
23 first 0:0acdaf898367
23 first 0:0acdaf898367
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 created new head
25 created new head
26 .hgtags@c071f74ab5eb, line 2: cannot parse entry
26 .hgtags@c071f74ab5eb, line 2: cannot parse entry
27 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
27 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
28 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
28 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
29 localtags, line 1: tag 'invalid' refers to unknown node
30 tip 8:4ca6f1b1a68c
29 tip 8:4ca6f1b1a68c
31 first 0:0acdaf898367
30 first 0:0acdaf898367
32 changeset: 8:4ca6f1b1a68c
31 changeset: 8:4ca6f1b1a68c
33 .hgtags@c071f74ab5eb, line 2: cannot parse entry
32 .hgtags@c071f74ab5eb, line 2: cannot parse entry
34 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
33 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
35 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
34 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
36 localtags, line 1: tag 'invalid' refers to unknown node
37 tag: tip
35 tag: tip
38 parent: 3:b2ef3841386b
36 parent: 3:b2ef3841386b
39 user: test
37 user: test
40 date: Mon Jan 12 13:46:40 1970 +0000
38 date: Mon Jan 12 13:46:40 1970 +0000
41 summary: head
39 summary: head
42
40
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 created new head
43 created new head
46 tip 4:36195b728445
44 tip 4:36195b728445
47 bar 1:b204a97e6e8d
45 bar 1:b204a97e6e8d
48 changeset: 5:1f98c77278de
46 changeset: 5:1f98c77278de
49 tag: tip
47 tag: tip
50 user: test
48 user: test
51 date: Mon Jan 12 13:46:40 1970 +0000
49 date: Mon Jan 12 13:46:40 1970 +0000
52 summary: Removed tag bar
50 summary: Removed tag bar
53
51
54 tip 5:1f98c77278de
52 tip 5:1f98c77278de
55 % remove nonexistent tag
53 % remove nonexistent tag
56 abort: tag 'foobar' does not exist
54 abort: tag 'foobar' does not exist
57 changeset: 5:1f98c77278de
55 changeset: 5:1f98c77278de
58 tag: tip
56 tag: tip
59 user: test
57 user: test
60 date: Mon Jan 12 13:46:40 1970 +0000
58 date: Mon Jan 12 13:46:40 1970 +0000
61 summary: Removed tag bar
59 summary: Removed tag bar
62
60
63 tip 5:e86d7ed95fd3
61 tip 5:e86d7ed95fd3
64 bar 0:b409d9da318e
62 bar 0:b409d9da318e
65 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 created new head
64 created new head
67 tip 6:b744fbe1f6dd
65 tip 6:b744fbe1f6dd
68 bar 0:b409d9da318e
66 bar 0:b409d9da318e
69 abort: tag 'bar' already exists (use -f to force)
67 abort: tag 'bar' already exists (use -f to force)
70 tip 6:b744fbe1f6dd
68 tip 6:b744fbe1f6dd
71 bar 0:b409d9da318e
69 bar 0:b409d9da318e
72 adding foo
70 adding foo
73 tip 3:197c21bbbf2c
71 tip 3:197c21bbbf2c
74 bar 2:6fa450212aeb
72 bar 2:6fa450212aeb
75 % bar should still point to rev 2
73 % bar should still point to rev 2
76 tip 4:3b4b14ed0202
74 tip 4:3b4b14ed0202
77 bar 2:6fa450212aeb
75 bar 2:6fa450212aeb
78 adding foo
76 adding foo
79 abort: tag 'localtag' is not a global tag
77 abort: tag 'localtag' is not a global tag
80 abort: tag 'globaltag' is not a local tag
78 abort: tag 'globaltag' is not a local tag
81 tip 1:a0b6fe111088
79 tip 1:a0b6fe111088
82 localtag 0:bbd179dfa0a7 local
80 localtag 0:bbd179dfa0a7 local
83 globaltag 0:bbd179dfa0a7
81 globaltag 0:bbd179dfa0a7
General Comments 0
You need to be logged in to leave comments. Login now