##// END OF EJS Templates
tag: force load of tag cache
Matt Mackall -
r7814:4421abf8 default
parent child Browse files
Show More
@@ -1,2162 +1,2163 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise error.RepoError(_("repository %s not found") % path)
50 raise error.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise error.RepoError(_("repository %s already exists") % path)
52 raise error.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise error.RepoError(_("requirement '%s' not supported") % r)
60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 if 'HG_PENDING' in os.environ:
91 if 'HG_PENDING' in os.environ:
92 p = os.environ['HG_PENDING']
92 p = os.environ['HG_PENDING']
93 if p.startswith(self.root):
93 if p.startswith(self.root):
94 self.changelog.readpending('00changelog.i.a')
94 self.changelog.readpending('00changelog.i.a')
95 self.sopener.defversion = self.changelog.version
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
96 return self.changelog
97 if name == 'manifest':
97 if name == 'manifest':
98 self.changelog
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
100 return self.manifest
101 if name == 'dirstate':
101 if name == 'dirstate':
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
103 return self.dirstate
104 else:
104 else:
105 raise AttributeError(name)
105 raise AttributeError(name)
106
106
107 def __getitem__(self, changeid):
107 def __getitem__(self, changeid):
108 if changeid == None:
108 if changeid == None:
109 return context.workingctx(self)
109 return context.workingctx(self)
110 return context.changectx(self, changeid)
110 return context.changectx(self, changeid)
111
111
112 def __nonzero__(self):
112 def __nonzero__(self):
113 return True
113 return True
114
114
115 def __len__(self):
115 def __len__(self):
116 return len(self.changelog)
116 return len(self.changelog)
117
117
118 def __iter__(self):
118 def __iter__(self):
119 for i in xrange(len(self)):
119 for i in xrange(len(self)):
120 yield i
120 yield i
121
121
122 def url(self):
122 def url(self):
123 return 'file:' + self.root
123 return 'file:' + self.root
124
124
125 def hook(self, name, throw=False, **args):
125 def hook(self, name, throw=False, **args):
126 return hook.hook(self.ui, self, name, throw, **args)
126 return hook.hook(self.ui, self, name, throw, **args)
127
127
128 tag_disallowed = ':\r\n'
128 tag_disallowed = ':\r\n'
129
129
130 def _tag(self, names, node, message, local, user, date, parent=None,
130 def _tag(self, names, node, message, local, user, date, parent=None,
131 extra={}):
131 extra={}):
132 use_dirstate = parent is None
132 use_dirstate = parent is None
133
133
134 if isinstance(names, str):
134 if isinstance(names, str):
135 allchars = names
135 allchars = names
136 names = (names,)
136 names = (names,)
137 else:
137 else:
138 allchars = ''.join(names)
138 allchars = ''.join(names)
139 for c in self.tag_disallowed:
139 for c in self.tag_disallowed:
140 if c in allchars:
140 if c in allchars:
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
142
142
143 for name in names:
143 for name in names:
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
145 local=local)
145 local=local)
146
146
147 def writetags(fp, names, munge, prevtags):
147 def writetags(fp, names, munge, prevtags):
148 fp.seek(0, 2)
148 fp.seek(0, 2)
149 if prevtags and prevtags[-1] != '\n':
149 if prevtags and prevtags[-1] != '\n':
150 fp.write('\n')
150 fp.write('\n')
151 for name in names:
151 for name in names:
152 m = munge and munge(name) or name
152 m = munge and munge(name) or name
153 if self._tagstypecache and name in self._tagstypecache:
153 if self._tagstypecache and name in self._tagstypecache:
154 old = self.tagscache.get(name, nullid)
154 old = self.tagscache.get(name, nullid)
155 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(old), m))
156 fp.write('%s %s\n' % (hex(node), m))
156 fp.write('%s %s\n' % (hex(node), m))
157 fp.close()
157 fp.close()
158
158
159 prevtags = ''
159 prevtags = ''
160 if local:
160 if local:
161 try:
161 try:
162 fp = self.opener('localtags', 'r+')
162 fp = self.opener('localtags', 'r+')
163 except IOError, err:
163 except IOError, err:
164 fp = self.opener('localtags', 'a')
164 fp = self.opener('localtags', 'a')
165 else:
165 else:
166 prevtags = fp.read()
166 prevtags = fp.read()
167
167
168 # local tags are stored in the current charset
168 # local tags are stored in the current charset
169 writetags(fp, names, None, prevtags)
169 writetags(fp, names, None, prevtags)
170 for name in names:
170 for name in names:
171 self.hook('tag', node=hex(node), tag=name, local=local)
171 self.hook('tag', node=hex(node), tag=name, local=local)
172 return
172 return
173
173
174 if use_dirstate:
174 if use_dirstate:
175 try:
175 try:
176 fp = self.wfile('.hgtags', 'rb+')
176 fp = self.wfile('.hgtags', 'rb+')
177 except IOError, err:
177 except IOError, err:
178 fp = self.wfile('.hgtags', 'ab')
178 fp = self.wfile('.hgtags', 'ab')
179 else:
179 else:
180 prevtags = fp.read()
180 prevtags = fp.read()
181 else:
181 else:
182 try:
182 try:
183 prevtags = self.filectx('.hgtags', parent).data()
183 prevtags = self.filectx('.hgtags', parent).data()
184 except error.LookupError:
184 except error.LookupError:
185 pass
185 pass
186 fp = self.wfile('.hgtags', 'wb')
186 fp = self.wfile('.hgtags', 'wb')
187 if prevtags:
187 if prevtags:
188 fp.write(prevtags)
188 fp.write(prevtags)
189
189
190 # committed tags are stored in UTF-8
190 # committed tags are stored in UTF-8
191 writetags(fp, names, util.fromlocal, prevtags)
191 writetags(fp, names, util.fromlocal, prevtags)
192
192
193 if use_dirstate and '.hgtags' not in self.dirstate:
193 if use_dirstate and '.hgtags' not in self.dirstate:
194 self.add(['.hgtags'])
194 self.add(['.hgtags'])
195
195
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
197 extra=extra)
197 extra=extra)
198
198
199 for name in names:
199 for name in names:
200 self.hook('tag', node=hex(node), tag=name, local=local)
200 self.hook('tag', node=hex(node), tag=name, local=local)
201
201
202 return tagnode
202 return tagnode
203
203
204 def tag(self, names, node, message, local, user, date):
204 def tag(self, names, node, message, local, user, date):
205 '''tag a revision with one or more symbolic names.
205 '''tag a revision with one or more symbolic names.
206
206
207 names is a list of strings or, when adding a single tag, names may be a
207 names is a list of strings or, when adding a single tag, names may be a
208 string.
208 string.
209
209
210 if local is True, the tags are stored in a per-repository file.
210 if local is True, the tags are stored in a per-repository file.
211 otherwise, they are stored in the .hgtags file, and a new
211 otherwise, they are stored in the .hgtags file, and a new
212 changeset is committed with the change.
212 changeset is committed with the change.
213
213
214 keyword arguments:
214 keyword arguments:
215
215
216 local: whether to store tags in non-version-controlled file
216 local: whether to store tags in non-version-controlled file
217 (default False)
217 (default False)
218
218
219 message: commit message to use if committing
219 message: commit message to use if committing
220
220
221 user: name of user to use if committing
221 user: name of user to use if committing
222
222
223 date: date tuple to use if committing'''
223 date: date tuple to use if committing'''
224
224
225 for x in self.status()[:5]:
225 for x in self.status()[:5]:
226 if '.hgtags' in x:
226 if '.hgtags' in x:
227 raise util.Abort(_('working copy of .hgtags is changed '
227 raise util.Abort(_('working copy of .hgtags is changed '
228 '(please commit .hgtags manually)'))
228 '(please commit .hgtags manually)'))
229
229
230 self.tags() # instantiate the cache
230 self._tag(names, node, message, local, user, date)
231 self._tag(names, node, message, local, user, date)
231
232
232 def tags(self):
233 def tags(self):
233 '''return a mapping of tag to node'''
234 '''return a mapping of tag to node'''
234 if self.tagscache:
235 if self.tagscache:
235 return self.tagscache
236 return self.tagscache
236
237
237 globaltags = {}
238 globaltags = {}
238 tagtypes = {}
239 tagtypes = {}
239
240
240 def readtags(lines, fn, tagtype):
241 def readtags(lines, fn, tagtype):
241 filetags = {}
242 filetags = {}
242 count = 0
243 count = 0
243
244
244 def warn(msg):
245 def warn(msg):
245 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
246 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
246
247
247 for l in lines:
248 for l in lines:
248 count += 1
249 count += 1
249 if not l:
250 if not l:
250 continue
251 continue
251 s = l.split(" ", 1)
252 s = l.split(" ", 1)
252 if len(s) != 2:
253 if len(s) != 2:
253 warn(_("cannot parse entry"))
254 warn(_("cannot parse entry"))
254 continue
255 continue
255 node, key = s
256 node, key = s
256 key = util.tolocal(key.strip()) # stored in UTF-8
257 key = util.tolocal(key.strip()) # stored in UTF-8
257 try:
258 try:
258 bin_n = bin(node)
259 bin_n = bin(node)
259 except TypeError:
260 except TypeError:
260 warn(_("node '%s' is not well formed") % node)
261 warn(_("node '%s' is not well formed") % node)
261 continue
262 continue
262 if bin_n not in self.changelog.nodemap:
263 if bin_n not in self.changelog.nodemap:
263 warn(_("tag '%s' refers to unknown node") % key)
264 warn(_("tag '%s' refers to unknown node") % key)
264 continue
265 continue
265
266
266 h = []
267 h = []
267 if key in filetags:
268 if key in filetags:
268 n, h = filetags[key]
269 n, h = filetags[key]
269 h.append(n)
270 h.append(n)
270 filetags[key] = (bin_n, h)
271 filetags[key] = (bin_n, h)
271
272
272 for k, nh in filetags.iteritems():
273 for k, nh in filetags.iteritems():
273 if k not in globaltags:
274 if k not in globaltags:
274 globaltags[k] = nh
275 globaltags[k] = nh
275 tagtypes[k] = tagtype
276 tagtypes[k] = tagtype
276 continue
277 continue
277
278
278 # we prefer the global tag if:
279 # we prefer the global tag if:
279 # it supercedes us OR
280 # it supercedes us OR
280 # mutual supercedes and it has a higher rank
281 # mutual supercedes and it has a higher rank
281 # otherwise we win because we're tip-most
282 # otherwise we win because we're tip-most
282 an, ah = nh
283 an, ah = nh
283 bn, bh = globaltags[k]
284 bn, bh = globaltags[k]
284 if (bn != an and an in bh and
285 if (bn != an and an in bh and
285 (bn not in ah or len(bh) > len(ah))):
286 (bn not in ah or len(bh) > len(ah))):
286 an = bn
287 an = bn
287 ah.extend([n for n in bh if n not in ah])
288 ah.extend([n for n in bh if n not in ah])
288 globaltags[k] = an, ah
289 globaltags[k] = an, ah
289 tagtypes[k] = tagtype
290 tagtypes[k] = tagtype
290
291
291 # read the tags file from each head, ending with the tip
292 # read the tags file from each head, ending with the tip
292 f = None
293 f = None
293 for rev, node, fnode in self._hgtagsnodes():
294 for rev, node, fnode in self._hgtagsnodes():
294 f = (f and f.filectx(fnode) or
295 f = (f and f.filectx(fnode) or
295 self.filectx('.hgtags', fileid=fnode))
296 self.filectx('.hgtags', fileid=fnode))
296 readtags(f.data().splitlines(), f, "global")
297 readtags(f.data().splitlines(), f, "global")
297
298
298 try:
299 try:
299 data = util.fromlocal(self.opener("localtags").read())
300 data = util.fromlocal(self.opener("localtags").read())
300 # localtags are stored in the local character set
301 # localtags are stored in the local character set
301 # while the internal tag table is stored in UTF-8
302 # while the internal tag table is stored in UTF-8
302 readtags(data.splitlines(), "localtags", "local")
303 readtags(data.splitlines(), "localtags", "local")
303 except IOError:
304 except IOError:
304 pass
305 pass
305
306
306 self.tagscache = {}
307 self.tagscache = {}
307 self._tagstypecache = {}
308 self._tagstypecache = {}
308 for k, nh in globaltags.iteritems():
309 for k, nh in globaltags.iteritems():
309 n = nh[0]
310 n = nh[0]
310 if n != nullid:
311 if n != nullid:
311 self.tagscache[k] = n
312 self.tagscache[k] = n
312 self._tagstypecache[k] = tagtypes[k]
313 self._tagstypecache[k] = tagtypes[k]
313 self.tagscache['tip'] = self.changelog.tip()
314 self.tagscache['tip'] = self.changelog.tip()
314 return self.tagscache
315 return self.tagscache
315
316
316 def tagtype(self, tagname):
317 def tagtype(self, tagname):
317 '''
318 '''
318 return the type of the given tag. result can be:
319 return the type of the given tag. result can be:
319
320
320 'local' : a local tag
321 'local' : a local tag
321 'global' : a global tag
322 'global' : a global tag
322 None : tag does not exist
323 None : tag does not exist
323 '''
324 '''
324
325
325 self.tags()
326 self.tags()
326
327
327 return self._tagstypecache.get(tagname)
328 return self._tagstypecache.get(tagname)
328
329
329 def _hgtagsnodes(self):
330 def _hgtagsnodes(self):
330 heads = self.heads()
331 heads = self.heads()
331 heads.reverse()
332 heads.reverse()
332 last = {}
333 last = {}
333 ret = []
334 ret = []
334 for node in heads:
335 for node in heads:
335 c = self[node]
336 c = self[node]
336 rev = c.rev()
337 rev = c.rev()
337 try:
338 try:
338 fnode = c.filenode('.hgtags')
339 fnode = c.filenode('.hgtags')
339 except error.LookupError:
340 except error.LookupError:
340 continue
341 continue
341 ret.append((rev, node, fnode))
342 ret.append((rev, node, fnode))
342 if fnode in last:
343 if fnode in last:
343 ret[last[fnode]] = None
344 ret[last[fnode]] = None
344 last[fnode] = len(ret) - 1
345 last[fnode] = len(ret) - 1
345 return [item for item in ret if item]
346 return [item for item in ret if item]
346
347
347 def tagslist(self):
348 def tagslist(self):
348 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
349 l = []
350 l = []
350 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
351 try:
352 try:
352 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
353 except:
354 except:
354 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
355 l.append((r, t, n))
356 l.append((r, t, n))
356 return [(t, n) for r, t, n in util.sort(l)]
357 return [(t, n) for r, t, n in util.sort(l)]
357
358
358 def nodetags(self, node):
359 def nodetags(self, node):
359 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
360 if not self.nodetagscache:
361 if not self.nodetagscache:
361 self.nodetagscache = {}
362 self.nodetagscache = {}
362 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
363 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
364 return self.nodetagscache.get(node, [])
365 return self.nodetagscache.get(node, [])
365
366
366 def _branchtags(self, partial, lrev):
367 def _branchtags(self, partial, lrev):
367 # TODO: rename this function?
368 # TODO: rename this function?
368 tiprev = len(self) - 1
369 tiprev = len(self) - 1
369 if lrev != tiprev:
370 if lrev != tiprev:
370 self._updatebranchcache(partial, lrev+1, tiprev+1)
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
371 self._writebranchcache(partial, self.changelog.tip(), tiprev)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
372
373
373 return partial
374 return partial
374
375
375 def _branchheads(self):
376 def _branchheads(self):
376 tip = self.changelog.tip()
377 tip = self.changelog.tip()
377 if self.branchcache is not None and self._branchcachetip == tip:
378 if self.branchcache is not None and self._branchcachetip == tip:
378 return self.branchcache
379 return self.branchcache
379
380
380 oldtip = self._branchcachetip
381 oldtip = self._branchcachetip
381 self._branchcachetip = tip
382 self._branchcachetip = tip
382 if self.branchcache is None:
383 if self.branchcache is None:
383 self.branchcache = {} # avoid recursion in changectx
384 self.branchcache = {} # avoid recursion in changectx
384 else:
385 else:
385 self.branchcache.clear() # keep using the same dict
386 self.branchcache.clear() # keep using the same dict
386 if oldtip is None or oldtip not in self.changelog.nodemap:
387 if oldtip is None or oldtip not in self.changelog.nodemap:
387 partial, last, lrev = self._readbranchcache()
388 partial, last, lrev = self._readbranchcache()
388 else:
389 else:
389 lrev = self.changelog.rev(oldtip)
390 lrev = self.changelog.rev(oldtip)
390 partial = self._ubranchcache
391 partial = self._ubranchcache
391
392
392 self._branchtags(partial, lrev)
393 self._branchtags(partial, lrev)
393 # this private cache holds all heads (not just tips)
394 # this private cache holds all heads (not just tips)
394 self._ubranchcache = partial
395 self._ubranchcache = partial
395
396
396 # the branch cache is stored on disk as UTF-8, but in the local
397 # the branch cache is stored on disk as UTF-8, but in the local
397 # charset internally
398 # charset internally
398 for k, v in partial.iteritems():
399 for k, v in partial.iteritems():
399 self.branchcache[util.tolocal(k)] = v
400 self.branchcache[util.tolocal(k)] = v
400 return self.branchcache
401 return self.branchcache
401
402
402
403
403 def branchtags(self):
404 def branchtags(self):
404 '''return a dict where branch names map to the tipmost head of
405 '''return a dict where branch names map to the tipmost head of
405 the branch, open heads come before closed'''
406 the branch, open heads come before closed'''
406 bt = {}
407 bt = {}
407 for bn, heads in self._branchheads().iteritems():
408 for bn, heads in self._branchheads().iteritems():
408 head = None
409 head = None
409 for i in range(len(heads)-1, -1, -1):
410 for i in range(len(heads)-1, -1, -1):
410 h = heads[i]
411 h = heads[i]
411 if 'close' not in self.changelog.read(h)[5]:
412 if 'close' not in self.changelog.read(h)[5]:
412 head = h
413 head = h
413 break
414 break
414 # no open heads were found
415 # no open heads were found
415 if head is None:
416 if head is None:
416 head = heads[-1]
417 head = heads[-1]
417 bt[bn] = head
418 bt[bn] = head
418 return bt
419 return bt
419
420
420
421
421 def _readbranchcache(self):
422 def _readbranchcache(self):
422 partial = {}
423 partial = {}
423 try:
424 try:
424 f = self.opener("branchheads.cache")
425 f = self.opener("branchheads.cache")
425 lines = f.read().split('\n')
426 lines = f.read().split('\n')
426 f.close()
427 f.close()
427 except (IOError, OSError):
428 except (IOError, OSError):
428 return {}, nullid, nullrev
429 return {}, nullid, nullrev
429
430
430 try:
431 try:
431 last, lrev = lines.pop(0).split(" ", 1)
432 last, lrev = lines.pop(0).split(" ", 1)
432 last, lrev = bin(last), int(lrev)
433 last, lrev = bin(last), int(lrev)
433 if lrev >= len(self) or self[lrev].node() != last:
434 if lrev >= len(self) or self[lrev].node() != last:
434 # invalidate the cache
435 # invalidate the cache
435 raise ValueError('invalidating branch cache (tip differs)')
436 raise ValueError('invalidating branch cache (tip differs)')
436 for l in lines:
437 for l in lines:
437 if not l: continue
438 if not l: continue
438 node, label = l.split(" ", 1)
439 node, label = l.split(" ", 1)
439 partial.setdefault(label.strip(), []).append(bin(node))
440 partial.setdefault(label.strip(), []).append(bin(node))
440 except KeyboardInterrupt:
441 except KeyboardInterrupt:
441 raise
442 raise
442 except Exception, inst:
443 except Exception, inst:
443 if self.ui.debugflag:
444 if self.ui.debugflag:
444 self.ui.warn(str(inst), '\n')
445 self.ui.warn(str(inst), '\n')
445 partial, last, lrev = {}, nullid, nullrev
446 partial, last, lrev = {}, nullid, nullrev
446 return partial, last, lrev
447 return partial, last, lrev
447
448
448 def _writebranchcache(self, branches, tip, tiprev):
449 def _writebranchcache(self, branches, tip, tiprev):
449 try:
450 try:
450 f = self.opener("branchheads.cache", "w", atomictemp=True)
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
451 f.write("%s %s\n" % (hex(tip), tiprev))
452 f.write("%s %s\n" % (hex(tip), tiprev))
452 for label, nodes in branches.iteritems():
453 for label, nodes in branches.iteritems():
453 for node in nodes:
454 for node in nodes:
454 f.write("%s %s\n" % (hex(node), label))
455 f.write("%s %s\n" % (hex(node), label))
455 f.rename()
456 f.rename()
456 except (IOError, OSError):
457 except (IOError, OSError):
457 pass
458 pass
458
459
459 def _updatebranchcache(self, partial, start, end):
460 def _updatebranchcache(self, partial, start, end):
460 for r in xrange(start, end):
461 for r in xrange(start, end):
461 c = self[r]
462 c = self[r]
462 b = c.branch()
463 b = c.branch()
463 bheads = partial.setdefault(b, [])
464 bheads = partial.setdefault(b, [])
464 bheads.append(c.node())
465 bheads.append(c.node())
465 for p in c.parents():
466 for p in c.parents():
466 pn = p.node()
467 pn = p.node()
467 if pn in bheads:
468 if pn in bheads:
468 bheads.remove(pn)
469 bheads.remove(pn)
469
470
470 def lookup(self, key):
471 def lookup(self, key):
471 if isinstance(key, int):
472 if isinstance(key, int):
472 return self.changelog.node(key)
473 return self.changelog.node(key)
473 elif key == '.':
474 elif key == '.':
474 return self.dirstate.parents()[0]
475 return self.dirstate.parents()[0]
475 elif key == 'null':
476 elif key == 'null':
476 return nullid
477 return nullid
477 elif key == 'tip':
478 elif key == 'tip':
478 return self.changelog.tip()
479 return self.changelog.tip()
479 n = self.changelog._match(key)
480 n = self.changelog._match(key)
480 if n:
481 if n:
481 return n
482 return n
482 if key in self.tags():
483 if key in self.tags():
483 return self.tags()[key]
484 return self.tags()[key]
484 if key in self.branchtags():
485 if key in self.branchtags():
485 return self.branchtags()[key]
486 return self.branchtags()[key]
486 n = self.changelog._partialmatch(key)
487 n = self.changelog._partialmatch(key)
487 if n:
488 if n:
488 return n
489 return n
489 try:
490 try:
490 if len(key) == 20:
491 if len(key) == 20:
491 key = hex(key)
492 key = hex(key)
492 except:
493 except:
493 pass
494 pass
494 raise error.RepoError(_("unknown revision '%s'") % key)
495 raise error.RepoError(_("unknown revision '%s'") % key)
495
496
496 def local(self):
497 def local(self):
497 return True
498 return True
498
499
499 def join(self, f):
500 def join(self, f):
500 return os.path.join(self.path, f)
501 return os.path.join(self.path, f)
501
502
502 def wjoin(self, f):
503 def wjoin(self, f):
503 return os.path.join(self.root, f)
504 return os.path.join(self.root, f)
504
505
505 def rjoin(self, f):
506 def rjoin(self, f):
506 return os.path.join(self.root, util.pconvert(f))
507 return os.path.join(self.root, util.pconvert(f))
507
508
508 def file(self, f):
509 def file(self, f):
509 if f[0] == '/':
510 if f[0] == '/':
510 f = f[1:]
511 f = f[1:]
511 return filelog.filelog(self.sopener, f)
512 return filelog.filelog(self.sopener, f)
512
513
513 def changectx(self, changeid):
514 def changectx(self, changeid):
514 return self[changeid]
515 return self[changeid]
515
516
516 def parents(self, changeid=None):
517 def parents(self, changeid=None):
517 '''get list of changectxs for parents of changeid'''
518 '''get list of changectxs for parents of changeid'''
518 return self[changeid].parents()
519 return self[changeid].parents()
519
520
520 def filectx(self, path, changeid=None, fileid=None):
521 def filectx(self, path, changeid=None, fileid=None):
521 """changeid can be a changeset revision, node, or tag.
522 """changeid can be a changeset revision, node, or tag.
522 fileid can be a file revision or node."""
523 fileid can be a file revision or node."""
523 return context.filectx(self, path, changeid, fileid)
524 return context.filectx(self, path, changeid, fileid)
524
525
525 def getcwd(self):
526 def getcwd(self):
526 return self.dirstate.getcwd()
527 return self.dirstate.getcwd()
527
528
528 def pathto(self, f, cwd=None):
529 def pathto(self, f, cwd=None):
529 return self.dirstate.pathto(f, cwd)
530 return self.dirstate.pathto(f, cwd)
530
531
531 def wfile(self, f, mode='r'):
532 def wfile(self, f, mode='r'):
532 return self.wopener(f, mode)
533 return self.wopener(f, mode)
533
534
534 def _link(self, f):
535 def _link(self, f):
535 return os.path.islink(self.wjoin(f))
536 return os.path.islink(self.wjoin(f))
536
537
537 def _filter(self, filter, filename, data):
538 def _filter(self, filter, filename, data):
538 if filter not in self.filterpats:
539 if filter not in self.filterpats:
539 l = []
540 l = []
540 for pat, cmd in self.ui.configitems(filter):
541 for pat, cmd in self.ui.configitems(filter):
541 if cmd == '!':
542 if cmd == '!':
542 continue
543 continue
543 mf = util.matcher(self.root, "", [pat], [], [])[1]
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
544 fn = None
545 fn = None
545 params = cmd
546 params = cmd
546 for name, filterfn in self._datafilters.iteritems():
547 for name, filterfn in self._datafilters.iteritems():
547 if cmd.startswith(name):
548 if cmd.startswith(name):
548 fn = filterfn
549 fn = filterfn
549 params = cmd[len(name):].lstrip()
550 params = cmd[len(name):].lstrip()
550 break
551 break
551 if not fn:
552 if not fn:
552 fn = lambda s, c, **kwargs: util.filter(s, c)
553 fn = lambda s, c, **kwargs: util.filter(s, c)
553 # Wrap old filters not supporting keyword arguments
554 # Wrap old filters not supporting keyword arguments
554 if not inspect.getargspec(fn)[2]:
555 if not inspect.getargspec(fn)[2]:
555 oldfn = fn
556 oldfn = fn
556 fn = lambda s, c, **kwargs: oldfn(s, c)
557 fn = lambda s, c, **kwargs: oldfn(s, c)
557 l.append((mf, fn, params))
558 l.append((mf, fn, params))
558 self.filterpats[filter] = l
559 self.filterpats[filter] = l
559
560
560 for mf, fn, cmd in self.filterpats[filter]:
561 for mf, fn, cmd in self.filterpats[filter]:
561 if mf(filename):
562 if mf(filename):
562 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
563 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
564 break
565 break
565
566
566 return data
567 return data
567
568
568 def adddatafilter(self, name, filter):
569 def adddatafilter(self, name, filter):
569 self._datafilters[name] = filter
570 self._datafilters[name] = filter
570
571
571 def wread(self, filename):
572 def wread(self, filename):
572 if self._link(filename):
573 if self._link(filename):
573 data = os.readlink(self.wjoin(filename))
574 data = os.readlink(self.wjoin(filename))
574 else:
575 else:
575 data = self.wopener(filename, 'r').read()
576 data = self.wopener(filename, 'r').read()
576 return self._filter("encode", filename, data)
577 return self._filter("encode", filename, data)
577
578
578 def wwrite(self, filename, data, flags):
579 def wwrite(self, filename, data, flags):
579 data = self._filter("decode", filename, data)
580 data = self._filter("decode", filename, data)
580 try:
581 try:
581 os.unlink(self.wjoin(filename))
582 os.unlink(self.wjoin(filename))
582 except OSError:
583 except OSError:
583 pass
584 pass
584 if 'l' in flags:
585 if 'l' in flags:
585 self.wopener.symlink(data, filename)
586 self.wopener.symlink(data, filename)
586 else:
587 else:
587 self.wopener(filename, 'w').write(data)
588 self.wopener(filename, 'w').write(data)
588 if 'x' in flags:
589 if 'x' in flags:
589 util.set_flags(self.wjoin(filename), False, True)
590 util.set_flags(self.wjoin(filename), False, True)
590
591
591 def wwritedata(self, filename, data):
592 def wwritedata(self, filename, data):
592 return self._filter("decode", filename, data)
593 return self._filter("decode", filename, data)
593
594
594 def transaction(self):
595 def transaction(self):
595 if self._transref and self._transref():
596 if self._transref and self._transref():
596 return self._transref().nest()
597 return self._transref().nest()
597
598
598 # abort here if the journal already exists
599 # abort here if the journal already exists
599 if os.path.exists(self.sjoin("journal")):
600 if os.path.exists(self.sjoin("journal")):
600 raise error.RepoError(_("journal already exists - run hg recover"))
601 raise error.RepoError(_("journal already exists - run hg recover"))
601
602
602 # save dirstate for rollback
603 # save dirstate for rollback
603 try:
604 try:
604 ds = self.opener("dirstate").read()
605 ds = self.opener("dirstate").read()
605 except IOError:
606 except IOError:
606 ds = ""
607 ds = ""
607 self.opener("journal.dirstate", "w").write(ds)
608 self.opener("journal.dirstate", "w").write(ds)
608 self.opener("journal.branch", "w").write(self.dirstate.branch())
609 self.opener("journal.branch", "w").write(self.dirstate.branch())
609
610
610 renames = [(self.sjoin("journal"), self.sjoin("undo")),
611 renames = [(self.sjoin("journal"), self.sjoin("undo")),
611 (self.join("journal.dirstate"), self.join("undo.dirstate")),
612 (self.join("journal.dirstate"), self.join("undo.dirstate")),
612 (self.join("journal.branch"), self.join("undo.branch"))]
613 (self.join("journal.branch"), self.join("undo.branch"))]
613 tr = transaction.transaction(self.ui.warn, self.sopener,
614 tr = transaction.transaction(self.ui.warn, self.sopener,
614 self.sjoin("journal"),
615 self.sjoin("journal"),
615 aftertrans(renames),
616 aftertrans(renames),
616 self.store.createmode)
617 self.store.createmode)
617 self._transref = weakref.ref(tr)
618 self._transref = weakref.ref(tr)
618 return tr
619 return tr
619
620
620 def recover(self):
621 def recover(self):
621 l = self.lock()
622 l = self.lock()
622 try:
623 try:
623 if os.path.exists(self.sjoin("journal")):
624 if os.path.exists(self.sjoin("journal")):
624 self.ui.status(_("rolling back interrupted transaction\n"))
625 self.ui.status(_("rolling back interrupted transaction\n"))
625 transaction.rollback(self.sopener, self.sjoin("journal"))
626 transaction.rollback(self.sopener, self.sjoin("journal"))
626 self.invalidate()
627 self.invalidate()
627 return True
628 return True
628 else:
629 else:
629 self.ui.warn(_("no interrupted transaction available\n"))
630 self.ui.warn(_("no interrupted transaction available\n"))
630 return False
631 return False
631 finally:
632 finally:
632 del l
633 del l
633
634
634 def rollback(self):
635 def rollback(self):
635 wlock = lock = None
636 wlock = lock = None
636 try:
637 try:
637 wlock = self.wlock()
638 wlock = self.wlock()
638 lock = self.lock()
639 lock = self.lock()
639 if os.path.exists(self.sjoin("undo")):
640 if os.path.exists(self.sjoin("undo")):
640 self.ui.status(_("rolling back last transaction\n"))
641 self.ui.status(_("rolling back last transaction\n"))
641 transaction.rollback(self.sopener, self.sjoin("undo"))
642 transaction.rollback(self.sopener, self.sjoin("undo"))
642 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
643 try:
644 try:
644 branch = self.opener("undo.branch").read()
645 branch = self.opener("undo.branch").read()
645 self.dirstate.setbranch(branch)
646 self.dirstate.setbranch(branch)
646 except IOError:
647 except IOError:
647 self.ui.warn(_("Named branch could not be reset, "
648 self.ui.warn(_("Named branch could not be reset, "
648 "current branch still is: %s\n")
649 "current branch still is: %s\n")
649 % util.tolocal(self.dirstate.branch()))
650 % util.tolocal(self.dirstate.branch()))
650 self.invalidate()
651 self.invalidate()
651 self.dirstate.invalidate()
652 self.dirstate.invalidate()
652 else:
653 else:
653 self.ui.warn(_("no rollback information available\n"))
654 self.ui.warn(_("no rollback information available\n"))
654 finally:
655 finally:
655 del lock, wlock
656 del lock, wlock
656
657
657 def invalidate(self):
658 def invalidate(self):
658 for a in "changelog manifest".split():
659 for a in "changelog manifest".split():
659 if a in self.__dict__:
660 if a in self.__dict__:
660 delattr(self, a)
661 delattr(self, a)
661 self.tagscache = None
662 self.tagscache = None
662 self._tagstypecache = None
663 self._tagstypecache = None
663 self.nodetagscache = None
664 self.nodetagscache = None
664 self.branchcache = None
665 self.branchcache = None
665 self._ubranchcache = None
666 self._ubranchcache = None
666 self._branchcachetip = None
667 self._branchcachetip = None
667
668
668 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
669 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
669 try:
670 try:
670 l = lock.lock(lockname, 0, releasefn, desc=desc)
671 l = lock.lock(lockname, 0, releasefn, desc=desc)
671 except error.LockHeld, inst:
672 except error.LockHeld, inst:
672 if not wait:
673 if not wait:
673 raise
674 raise
674 self.ui.warn(_("waiting for lock on %s held by %r\n") %
675 self.ui.warn(_("waiting for lock on %s held by %r\n") %
675 (desc, inst.locker))
676 (desc, inst.locker))
676 # default to 600 seconds timeout
677 # default to 600 seconds timeout
677 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
678 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
678 releasefn, desc=desc)
679 releasefn, desc=desc)
679 if acquirefn:
680 if acquirefn:
680 acquirefn()
681 acquirefn()
681 return l
682 return l
682
683
683 def lock(self, wait=True):
684 def lock(self, wait=True):
684 if self._lockref and self._lockref():
685 if self._lockref and self._lockref():
685 return self._lockref()
686 return self._lockref()
686
687
687 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
688 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
688 _('repository %s') % self.origroot)
689 _('repository %s') % self.origroot)
689 self._lockref = weakref.ref(l)
690 self._lockref = weakref.ref(l)
690 return l
691 return l
691
692
692 def wlock(self, wait=True):
693 def wlock(self, wait=True):
693 if self._wlockref and self._wlockref():
694 if self._wlockref and self._wlockref():
694 return self._wlockref()
695 return self._wlockref()
695
696
696 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
697 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
697 self.dirstate.invalidate, _('working directory of %s') %
698 self.dirstate.invalidate, _('working directory of %s') %
698 self.origroot)
699 self.origroot)
699 self._wlockref = weakref.ref(l)
700 self._wlockref = weakref.ref(l)
700 return l
701 return l
701
702
702 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
703 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
703 """
704 """
704 commit an individual file as part of a larger transaction
705 commit an individual file as part of a larger transaction
705 """
706 """
706
707
707 fn = fctx.path()
708 fn = fctx.path()
708 t = fctx.data()
709 t = fctx.data()
709 fl = self.file(fn)
710 fl = self.file(fn)
710 fp1 = manifest1.get(fn, nullid)
711 fp1 = manifest1.get(fn, nullid)
711 fp2 = manifest2.get(fn, nullid)
712 fp2 = manifest2.get(fn, nullid)
712
713
713 meta = {}
714 meta = {}
714 cp = fctx.renamed()
715 cp = fctx.renamed()
715 if cp and cp[0] != fn:
716 if cp and cp[0] != fn:
716 # Mark the new revision of this file as a copy of another
717 # Mark the new revision of this file as a copy of another
717 # file. This copy data will effectively act as a parent
718 # file. This copy data will effectively act as a parent
718 # of this new revision. If this is a merge, the first
719 # of this new revision. If this is a merge, the first
719 # parent will be the nullid (meaning "look up the copy data")
720 # parent will be the nullid (meaning "look up the copy data")
720 # and the second one will be the other parent. For example:
721 # and the second one will be the other parent. For example:
721 #
722 #
722 # 0 --- 1 --- 3 rev1 changes file foo
723 # 0 --- 1 --- 3 rev1 changes file foo
723 # \ / rev2 renames foo to bar and changes it
724 # \ / rev2 renames foo to bar and changes it
724 # \- 2 -/ rev3 should have bar with all changes and
725 # \- 2 -/ rev3 should have bar with all changes and
725 # should record that bar descends from
726 # should record that bar descends from
726 # bar in rev2 and foo in rev1
727 # bar in rev2 and foo in rev1
727 #
728 #
728 # this allows this merge to succeed:
729 # this allows this merge to succeed:
729 #
730 #
730 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
731 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
731 # \ / merging rev3 and rev4 should use bar@rev2
732 # \ / merging rev3 and rev4 should use bar@rev2
732 # \- 2 --- 4 as the merge base
733 # \- 2 --- 4 as the merge base
733 #
734 #
734
735
735 cf = cp[0]
736 cf = cp[0]
736 cr = manifest1.get(cf)
737 cr = manifest1.get(cf)
737 nfp = fp2
738 nfp = fp2
738
739
739 if manifest2: # branch merge
740 if manifest2: # branch merge
740 if fp2 == nullid or cr is None: # copied on remote side
741 if fp2 == nullid or cr is None: # copied on remote side
741 if cf in manifest2:
742 if cf in manifest2:
742 cr = manifest2[cf]
743 cr = manifest2[cf]
743 nfp = fp1
744 nfp = fp1
744
745
745 # find source in nearest ancestor if we've lost track
746 # find source in nearest ancestor if we've lost track
746 if not cr:
747 if not cr:
747 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
748 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
748 (fn, cf))
749 (fn, cf))
749 for a in self['.'].ancestors():
750 for a in self['.'].ancestors():
750 if cf in a:
751 if cf in a:
751 cr = a[cf].filenode()
752 cr = a[cf].filenode()
752 break
753 break
753
754
754 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
755 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
755 meta["copy"] = cf
756 meta["copy"] = cf
756 meta["copyrev"] = hex(cr)
757 meta["copyrev"] = hex(cr)
757 fp1, fp2 = nullid, nfp
758 fp1, fp2 = nullid, nfp
758 elif fp2 != nullid:
759 elif fp2 != nullid:
759 # is one parent an ancestor of the other?
760 # is one parent an ancestor of the other?
760 fpa = fl.ancestor(fp1, fp2)
761 fpa = fl.ancestor(fp1, fp2)
761 if fpa == fp1:
762 if fpa == fp1:
762 fp1, fp2 = fp2, nullid
763 fp1, fp2 = fp2, nullid
763 elif fpa == fp2:
764 elif fpa == fp2:
764 fp2 = nullid
765 fp2 = nullid
765
766
766 # is the file unmodified from the parent? report existing entry
767 # is the file unmodified from the parent? report existing entry
767 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
768 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
768 return fp1
769 return fp1
769
770
770 changelist.append(fn)
771 changelist.append(fn)
771 return fl.add(t, meta, tr, linkrev, fp1, fp2)
772 return fl.add(t, meta, tr, linkrev, fp1, fp2)
772
773
773 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
774 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
774 if p1 is None:
775 if p1 is None:
775 p1, p2 = self.dirstate.parents()
776 p1, p2 = self.dirstate.parents()
776 return self.commit(files=files, text=text, user=user, date=date,
777 return self.commit(files=files, text=text, user=user, date=date,
777 p1=p1, p2=p2, extra=extra, empty_ok=True)
778 p1=p1, p2=p2, extra=extra, empty_ok=True)
778
779
779 def commit(self, files=None, text="", user=None, date=None,
780 def commit(self, files=None, text="", user=None, date=None,
780 match=None, force=False, force_editor=False,
781 match=None, force=False, force_editor=False,
781 p1=None, p2=None, extra={}, empty_ok=False):
782 p1=None, p2=None, extra={}, empty_ok=False):
782 wlock = lock = None
783 wlock = lock = None
783 if extra.get("close"):
784 if extra.get("close"):
784 force = True
785 force = True
785 if files:
786 if files:
786 files = util.unique(files)
787 files = util.unique(files)
787 try:
788 try:
788 wlock = self.wlock()
789 wlock = self.wlock()
789 lock = self.lock()
790 lock = self.lock()
790 use_dirstate = (p1 is None) # not rawcommit
791 use_dirstate = (p1 is None) # not rawcommit
791
792
792 if use_dirstate:
793 if use_dirstate:
793 p1, p2 = self.dirstate.parents()
794 p1, p2 = self.dirstate.parents()
794 update_dirstate = True
795 update_dirstate = True
795
796
796 if (not force and p2 != nullid and
797 if (not force and p2 != nullid and
797 (match and (match.files() or match.anypats()))):
798 (match and (match.files() or match.anypats()))):
798 raise util.Abort(_('cannot partially commit a merge '
799 raise util.Abort(_('cannot partially commit a merge '
799 '(do not specify files or patterns)'))
800 '(do not specify files or patterns)'))
800
801
801 if files:
802 if files:
802 modified, removed = [], []
803 modified, removed = [], []
803 for f in files:
804 for f in files:
804 s = self.dirstate[f]
805 s = self.dirstate[f]
805 if s in 'nma':
806 if s in 'nma':
806 modified.append(f)
807 modified.append(f)
807 elif s == 'r':
808 elif s == 'r':
808 removed.append(f)
809 removed.append(f)
809 else:
810 else:
810 self.ui.warn(_("%s not tracked!\n") % f)
811 self.ui.warn(_("%s not tracked!\n") % f)
811 changes = [modified, [], removed, [], []]
812 changes = [modified, [], removed, [], []]
812 else:
813 else:
813 changes = self.status(match=match)
814 changes = self.status(match=match)
814 else:
815 else:
815 p1, p2 = p1, p2 or nullid
816 p1, p2 = p1, p2 or nullid
816 update_dirstate = (self.dirstate.parents()[0] == p1)
817 update_dirstate = (self.dirstate.parents()[0] == p1)
817 changes = [files, [], [], [], []]
818 changes = [files, [], [], [], []]
818
819
819 ms = merge_.mergestate(self)
820 ms = merge_.mergestate(self)
820 for f in changes[0]:
821 for f in changes[0]:
821 if f in ms and ms[f] == 'u':
822 if f in ms and ms[f] == 'u':
822 raise util.Abort(_("unresolved merge conflicts "
823 raise util.Abort(_("unresolved merge conflicts "
823 "(see hg resolve)"))
824 "(see hg resolve)"))
824 wctx = context.workingctx(self, (p1, p2), text, user, date,
825 wctx = context.workingctx(self, (p1, p2), text, user, date,
825 extra, changes)
826 extra, changes)
826 return self._commitctx(wctx, force, force_editor, empty_ok,
827 return self._commitctx(wctx, force, force_editor, empty_ok,
827 use_dirstate, update_dirstate)
828 use_dirstate, update_dirstate)
828 finally:
829 finally:
829 del lock, wlock
830 del lock, wlock
830
831
831 def commitctx(self, ctx):
832 def commitctx(self, ctx):
832 """Add a new revision to current repository.
833 """Add a new revision to current repository.
833
834
834 Revision information is passed in the context.memctx argument.
835 Revision information is passed in the context.memctx argument.
835 commitctx() does not touch the working directory.
836 commitctx() does not touch the working directory.
836 """
837 """
837 wlock = lock = None
838 wlock = lock = None
838 try:
839 try:
839 wlock = self.wlock()
840 wlock = self.wlock()
840 lock = self.lock()
841 lock = self.lock()
841 return self._commitctx(ctx, force=True, force_editor=False,
842 return self._commitctx(ctx, force=True, force_editor=False,
842 empty_ok=True, use_dirstate=False,
843 empty_ok=True, use_dirstate=False,
843 update_dirstate=False)
844 update_dirstate=False)
844 finally:
845 finally:
845 del lock, wlock
846 del lock, wlock
846
847
847 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
848 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
848 use_dirstate=True, update_dirstate=True):
849 use_dirstate=True, update_dirstate=True):
849 tr = None
850 tr = None
850 valid = 0 # don't save the dirstate if this isn't set
851 valid = 0 # don't save the dirstate if this isn't set
851 try:
852 try:
852 commit = util.sort(wctx.modified() + wctx.added())
853 commit = util.sort(wctx.modified() + wctx.added())
853 remove = wctx.removed()
854 remove = wctx.removed()
854 extra = wctx.extra().copy()
855 extra = wctx.extra().copy()
855 branchname = extra['branch']
856 branchname = extra['branch']
856 user = wctx.user()
857 user = wctx.user()
857 text = wctx.description()
858 text = wctx.description()
858
859
859 p1, p2 = [p.node() for p in wctx.parents()]
860 p1, p2 = [p.node() for p in wctx.parents()]
860 c1 = self.changelog.read(p1)
861 c1 = self.changelog.read(p1)
861 c2 = self.changelog.read(p2)
862 c2 = self.changelog.read(p2)
862 m1 = self.manifest.read(c1[0]).copy()
863 m1 = self.manifest.read(c1[0]).copy()
863 m2 = self.manifest.read(c2[0])
864 m2 = self.manifest.read(c2[0])
864
865
865 if use_dirstate:
866 if use_dirstate:
866 oldname = c1[5].get("branch") # stored in UTF-8
867 oldname = c1[5].get("branch") # stored in UTF-8
867 if (not commit and not remove and not force and p2 == nullid
868 if (not commit and not remove and not force and p2 == nullid
868 and branchname == oldname):
869 and branchname == oldname):
869 self.ui.status(_("nothing changed\n"))
870 self.ui.status(_("nothing changed\n"))
870 return None
871 return None
871
872
872 xp1 = hex(p1)
873 xp1 = hex(p1)
873 if p2 == nullid: xp2 = ''
874 if p2 == nullid: xp2 = ''
874 else: xp2 = hex(p2)
875 else: xp2 = hex(p2)
875
876
876 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
877 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
877
878
878 tr = self.transaction()
879 tr = self.transaction()
879 trp = weakref.proxy(tr)
880 trp = weakref.proxy(tr)
880
881
881 # check in files
882 # check in files
882 new = {}
883 new = {}
883 changed = []
884 changed = []
884 linkrev = len(self)
885 linkrev = len(self)
885 for f in commit:
886 for f in commit:
886 self.ui.note(f + "\n")
887 self.ui.note(f + "\n")
887 try:
888 try:
888 fctx = wctx.filectx(f)
889 fctx = wctx.filectx(f)
889 newflags = fctx.flags()
890 newflags = fctx.flags()
890 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
891 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
891 if ((not changed or changed[-1] != f) and
892 if ((not changed or changed[-1] != f) and
892 m2.get(f) != new[f]):
893 m2.get(f) != new[f]):
893 # mention the file in the changelog if some
894 # mention the file in the changelog if some
894 # flag changed, even if there was no content
895 # flag changed, even if there was no content
895 # change.
896 # change.
896 if m1.flags(f) != newflags:
897 if m1.flags(f) != newflags:
897 changed.append(f)
898 changed.append(f)
898 m1.set(f, newflags)
899 m1.set(f, newflags)
899 if use_dirstate:
900 if use_dirstate:
900 self.dirstate.normal(f)
901 self.dirstate.normal(f)
901
902
902 except (OSError, IOError):
903 except (OSError, IOError):
903 if use_dirstate:
904 if use_dirstate:
904 self.ui.warn(_("trouble committing %s!\n") % f)
905 self.ui.warn(_("trouble committing %s!\n") % f)
905 raise
906 raise
906 else:
907 else:
907 remove.append(f)
908 remove.append(f)
908
909
909 updated, added = [], []
910 updated, added = [], []
910 for f in util.sort(changed):
911 for f in util.sort(changed):
911 if f in m1 or f in m2:
912 if f in m1 or f in m2:
912 updated.append(f)
913 updated.append(f)
913 else:
914 else:
914 added.append(f)
915 added.append(f)
915
916
916 # update manifest
917 # update manifest
917 m1.update(new)
918 m1.update(new)
918 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
919 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
919 removed1 = []
920 removed1 = []
920
921
921 for f in removed:
922 for f in removed:
922 if f in m1:
923 if f in m1:
923 del m1[f]
924 del m1[f]
924 removed1.append(f)
925 removed1.append(f)
925 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
926 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
926 (new, removed1))
927 (new, removed1))
927
928
928 # add changeset
929 # add changeset
929 if (not empty_ok and not text) or force_editor:
930 if (not empty_ok and not text) or force_editor:
930 edittext = []
931 edittext = []
931 if text:
932 if text:
932 edittext.append(text)
933 edittext.append(text)
933 edittext.append("")
934 edittext.append("")
934 edittext.append("") # Empty line between message and comments.
935 edittext.append("") # Empty line between message and comments.
935 edittext.append(_("HG: Enter commit message."
936 edittext.append(_("HG: Enter commit message."
936 " Lines beginning with 'HG:' are removed."))
937 " Lines beginning with 'HG:' are removed."))
937 edittext.append("HG: --")
938 edittext.append("HG: --")
938 edittext.append("HG: user: %s" % user)
939 edittext.append("HG: user: %s" % user)
939 if p2 != nullid:
940 if p2 != nullid:
940 edittext.append("HG: branch merge")
941 edittext.append("HG: branch merge")
941 if branchname:
942 if branchname:
942 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
943 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
943 edittext.extend(["HG: added %s" % f for f in added])
944 edittext.extend(["HG: added %s" % f for f in added])
944 edittext.extend(["HG: changed %s" % f for f in updated])
945 edittext.extend(["HG: changed %s" % f for f in updated])
945 edittext.extend(["HG: removed %s" % f for f in removed])
946 edittext.extend(["HG: removed %s" % f for f in removed])
946 if not added and not updated and not removed:
947 if not added and not updated and not removed:
947 edittext.append("HG: no files changed")
948 edittext.append("HG: no files changed")
948 edittext.append("")
949 edittext.append("")
949 # run editor in the repository root
950 # run editor in the repository root
950 olddir = os.getcwd()
951 olddir = os.getcwd()
951 os.chdir(self.root)
952 os.chdir(self.root)
952 text = self.ui.edit("\n".join(edittext), user)
953 text = self.ui.edit("\n".join(edittext), user)
953 os.chdir(olddir)
954 os.chdir(olddir)
954
955
955 lines = [line.rstrip() for line in text.rstrip().splitlines()]
956 lines = [line.rstrip() for line in text.rstrip().splitlines()]
956 while lines and not lines[0]:
957 while lines and not lines[0]:
957 del lines[0]
958 del lines[0]
958 if not lines and use_dirstate:
959 if not lines and use_dirstate:
959 raise util.Abort(_("empty commit message"))
960 raise util.Abort(_("empty commit message"))
960 text = '\n'.join(lines)
961 text = '\n'.join(lines)
961
962
962 self.changelog.delayupdate()
963 self.changelog.delayupdate()
963 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
964 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
964 user, wctx.date(), extra)
965 user, wctx.date(), extra)
965 p = lambda: self.changelog.writepending() and self.root or ""
966 p = lambda: self.changelog.writepending() and self.root or ""
966 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
967 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
967 parent2=xp2, pending=p)
968 parent2=xp2, pending=p)
968 self.changelog.finalize(trp)
969 self.changelog.finalize(trp)
969 tr.close()
970 tr.close()
970
971
971 if self.branchcache:
972 if self.branchcache:
972 self.branchtags()
973 self.branchtags()
973
974
974 if use_dirstate or update_dirstate:
975 if use_dirstate or update_dirstate:
975 self.dirstate.setparents(n)
976 self.dirstate.setparents(n)
976 if use_dirstate:
977 if use_dirstate:
977 for f in removed:
978 for f in removed:
978 self.dirstate.forget(f)
979 self.dirstate.forget(f)
979 valid = 1 # our dirstate updates are complete
980 valid = 1 # our dirstate updates are complete
980
981
981 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
982 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
982 return n
983 return n
983 finally:
984 finally:
984 if not valid: # don't save our updated dirstate
985 if not valid: # don't save our updated dirstate
985 self.dirstate.invalidate()
986 self.dirstate.invalidate()
986 del tr
987 del tr
987
988
988 def walk(self, match, node=None):
989 def walk(self, match, node=None):
989 '''
990 '''
990 walk recursively through the directory tree or a given
991 walk recursively through the directory tree or a given
991 changeset, finding all files matched by the match
992 changeset, finding all files matched by the match
992 function
993 function
993 '''
994 '''
994 return self[node].walk(match)
995 return self[node].walk(match)
995
996
996 def status(self, node1='.', node2=None, match=None,
997 def status(self, node1='.', node2=None, match=None,
997 ignored=False, clean=False, unknown=False):
998 ignored=False, clean=False, unknown=False):
998 """return status of files between two nodes or node and working directory
999 """return status of files between two nodes or node and working directory
999
1000
1000 If node1 is None, use the first dirstate parent instead.
1001 If node1 is None, use the first dirstate parent instead.
1001 If node2 is None, compare node1 with working directory.
1002 If node2 is None, compare node1 with working directory.
1002 """
1003 """
1003
1004
1004 def mfmatches(ctx):
1005 def mfmatches(ctx):
1005 mf = ctx.manifest().copy()
1006 mf = ctx.manifest().copy()
1006 for fn in mf.keys():
1007 for fn in mf.keys():
1007 if not match(fn):
1008 if not match(fn):
1008 del mf[fn]
1009 del mf[fn]
1009 return mf
1010 return mf
1010
1011
1011 if isinstance(node1, context.changectx):
1012 if isinstance(node1, context.changectx):
1012 ctx1 = node1
1013 ctx1 = node1
1013 else:
1014 else:
1014 ctx1 = self[node1]
1015 ctx1 = self[node1]
1015 if isinstance(node2, context.changectx):
1016 if isinstance(node2, context.changectx):
1016 ctx2 = node2
1017 ctx2 = node2
1017 else:
1018 else:
1018 ctx2 = self[node2]
1019 ctx2 = self[node2]
1019
1020
1020 working = ctx2.rev() is None
1021 working = ctx2.rev() is None
1021 parentworking = working and ctx1 == self['.']
1022 parentworking = working and ctx1 == self['.']
1022 match = match or match_.always(self.root, self.getcwd())
1023 match = match or match_.always(self.root, self.getcwd())
1023 listignored, listclean, listunknown = ignored, clean, unknown
1024 listignored, listclean, listunknown = ignored, clean, unknown
1024
1025
1025 # load earliest manifest first for caching reasons
1026 # load earliest manifest first for caching reasons
1026 if not working and ctx2.rev() < ctx1.rev():
1027 if not working and ctx2.rev() < ctx1.rev():
1027 ctx2.manifest()
1028 ctx2.manifest()
1028
1029
1029 if not parentworking:
1030 if not parentworking:
1030 def bad(f, msg):
1031 def bad(f, msg):
1031 if f not in ctx1:
1032 if f not in ctx1:
1032 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1033 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1033 return False
1034 return False
1034 match.bad = bad
1035 match.bad = bad
1035
1036
1036 if working: # we need to scan the working dir
1037 if working: # we need to scan the working dir
1037 s = self.dirstate.status(match, listignored, listclean, listunknown)
1038 s = self.dirstate.status(match, listignored, listclean, listunknown)
1038 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1039 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1039
1040
1040 # check for any possibly clean files
1041 # check for any possibly clean files
1041 if parentworking and cmp:
1042 if parentworking and cmp:
1042 fixup = []
1043 fixup = []
1043 # do a full compare of any files that might have changed
1044 # do a full compare of any files that might have changed
1044 for f in cmp:
1045 for f in cmp:
1045 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1046 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1046 or ctx1[f].cmp(ctx2[f].data())):
1047 or ctx1[f].cmp(ctx2[f].data())):
1047 modified.append(f)
1048 modified.append(f)
1048 else:
1049 else:
1049 fixup.append(f)
1050 fixup.append(f)
1050
1051
1051 if listclean:
1052 if listclean:
1052 clean += fixup
1053 clean += fixup
1053
1054
1054 # update dirstate for files that are actually clean
1055 # update dirstate for files that are actually clean
1055 if fixup:
1056 if fixup:
1056 wlock = None
1057 wlock = None
1057 try:
1058 try:
1058 try:
1059 try:
1059 wlock = self.wlock(False)
1060 wlock = self.wlock(False)
1060 for f in fixup:
1061 for f in fixup:
1061 self.dirstate.normal(f)
1062 self.dirstate.normal(f)
1062 except lock.LockError:
1063 except lock.LockError:
1063 pass
1064 pass
1064 finally:
1065 finally:
1065 del wlock
1066 del wlock
1066
1067
1067 if not parentworking:
1068 if not parentworking:
1068 mf1 = mfmatches(ctx1)
1069 mf1 = mfmatches(ctx1)
1069 if working:
1070 if working:
1070 # we are comparing working dir against non-parent
1071 # we are comparing working dir against non-parent
1071 # generate a pseudo-manifest for the working dir
1072 # generate a pseudo-manifest for the working dir
1072 mf2 = mfmatches(self['.'])
1073 mf2 = mfmatches(self['.'])
1073 for f in cmp + modified + added:
1074 for f in cmp + modified + added:
1074 mf2[f] = None
1075 mf2[f] = None
1075 mf2.set(f, ctx2.flags(f))
1076 mf2.set(f, ctx2.flags(f))
1076 for f in removed:
1077 for f in removed:
1077 if f in mf2:
1078 if f in mf2:
1078 del mf2[f]
1079 del mf2[f]
1079 else:
1080 else:
1080 # we are comparing two revisions
1081 # we are comparing two revisions
1081 deleted, unknown, ignored = [], [], []
1082 deleted, unknown, ignored = [], [], []
1082 mf2 = mfmatches(ctx2)
1083 mf2 = mfmatches(ctx2)
1083
1084
1084 modified, added, clean = [], [], []
1085 modified, added, clean = [], [], []
1085 for fn in mf2:
1086 for fn in mf2:
1086 if fn in mf1:
1087 if fn in mf1:
1087 if (mf1.flags(fn) != mf2.flags(fn) or
1088 if (mf1.flags(fn) != mf2.flags(fn) or
1088 (mf1[fn] != mf2[fn] and
1089 (mf1[fn] != mf2[fn] and
1089 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1090 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1090 modified.append(fn)
1091 modified.append(fn)
1091 elif listclean:
1092 elif listclean:
1092 clean.append(fn)
1093 clean.append(fn)
1093 del mf1[fn]
1094 del mf1[fn]
1094 else:
1095 else:
1095 added.append(fn)
1096 added.append(fn)
1096 removed = mf1.keys()
1097 removed = mf1.keys()
1097
1098
1098 r = modified, added, removed, deleted, unknown, ignored, clean
1099 r = modified, added, removed, deleted, unknown, ignored, clean
1099 [l.sort() for l in r]
1100 [l.sort() for l in r]
1100 return r
1101 return r
1101
1102
1102 def add(self, list):
1103 def add(self, list):
1103 wlock = self.wlock()
1104 wlock = self.wlock()
1104 try:
1105 try:
1105 rejected = []
1106 rejected = []
1106 for f in list:
1107 for f in list:
1107 p = self.wjoin(f)
1108 p = self.wjoin(f)
1108 try:
1109 try:
1109 st = os.lstat(p)
1110 st = os.lstat(p)
1110 except:
1111 except:
1111 self.ui.warn(_("%s does not exist!\n") % f)
1112 self.ui.warn(_("%s does not exist!\n") % f)
1112 rejected.append(f)
1113 rejected.append(f)
1113 continue
1114 continue
1114 if st.st_size > 10000000:
1115 if st.st_size > 10000000:
1115 self.ui.warn(_("%s: files over 10MB may cause memory and"
1116 self.ui.warn(_("%s: files over 10MB may cause memory and"
1116 " performance problems\n"
1117 " performance problems\n"
1117 "(use 'hg revert %s' to unadd the file)\n")
1118 "(use 'hg revert %s' to unadd the file)\n")
1118 % (f, f))
1119 % (f, f))
1119 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1120 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1120 self.ui.warn(_("%s not added: only files and symlinks "
1121 self.ui.warn(_("%s not added: only files and symlinks "
1121 "supported currently\n") % f)
1122 "supported currently\n") % f)
1122 rejected.append(p)
1123 rejected.append(p)
1123 elif self.dirstate[f] in 'amn':
1124 elif self.dirstate[f] in 'amn':
1124 self.ui.warn(_("%s already tracked!\n") % f)
1125 self.ui.warn(_("%s already tracked!\n") % f)
1125 elif self.dirstate[f] == 'r':
1126 elif self.dirstate[f] == 'r':
1126 self.dirstate.normallookup(f)
1127 self.dirstate.normallookup(f)
1127 else:
1128 else:
1128 self.dirstate.add(f)
1129 self.dirstate.add(f)
1129 return rejected
1130 return rejected
1130 finally:
1131 finally:
1131 del wlock
1132 del wlock
1132
1133
1133 def forget(self, list):
1134 def forget(self, list):
1134 wlock = self.wlock()
1135 wlock = self.wlock()
1135 try:
1136 try:
1136 for f in list:
1137 for f in list:
1137 if self.dirstate[f] != 'a':
1138 if self.dirstate[f] != 'a':
1138 self.ui.warn(_("%s not added!\n") % f)
1139 self.ui.warn(_("%s not added!\n") % f)
1139 else:
1140 else:
1140 self.dirstate.forget(f)
1141 self.dirstate.forget(f)
1141 finally:
1142 finally:
1142 del wlock
1143 del wlock
1143
1144
1144 def remove(self, list, unlink=False):
1145 def remove(self, list, unlink=False):
1145 wlock = None
1146 wlock = None
1146 try:
1147 try:
1147 if unlink:
1148 if unlink:
1148 for f in list:
1149 for f in list:
1149 try:
1150 try:
1150 util.unlink(self.wjoin(f))
1151 util.unlink(self.wjoin(f))
1151 except OSError, inst:
1152 except OSError, inst:
1152 if inst.errno != errno.ENOENT:
1153 if inst.errno != errno.ENOENT:
1153 raise
1154 raise
1154 wlock = self.wlock()
1155 wlock = self.wlock()
1155 for f in list:
1156 for f in list:
1156 if unlink and os.path.exists(self.wjoin(f)):
1157 if unlink and os.path.exists(self.wjoin(f)):
1157 self.ui.warn(_("%s still exists!\n") % f)
1158 self.ui.warn(_("%s still exists!\n") % f)
1158 elif self.dirstate[f] == 'a':
1159 elif self.dirstate[f] == 'a':
1159 self.dirstate.forget(f)
1160 self.dirstate.forget(f)
1160 elif f not in self.dirstate:
1161 elif f not in self.dirstate:
1161 self.ui.warn(_("%s not tracked!\n") % f)
1162 self.ui.warn(_("%s not tracked!\n") % f)
1162 else:
1163 else:
1163 self.dirstate.remove(f)
1164 self.dirstate.remove(f)
1164 finally:
1165 finally:
1165 del wlock
1166 del wlock
1166
1167
1167 def undelete(self, list):
1168 def undelete(self, list):
1168 wlock = None
1169 wlock = None
1169 try:
1170 try:
1170 manifests = [self.manifest.read(self.changelog.read(p)[0])
1171 manifests = [self.manifest.read(self.changelog.read(p)[0])
1171 for p in self.dirstate.parents() if p != nullid]
1172 for p in self.dirstate.parents() if p != nullid]
1172 wlock = self.wlock()
1173 wlock = self.wlock()
1173 for f in list:
1174 for f in list:
1174 if self.dirstate[f] != 'r':
1175 if self.dirstate[f] != 'r':
1175 self.ui.warn(_("%s not removed!\n") % f)
1176 self.ui.warn(_("%s not removed!\n") % f)
1176 else:
1177 else:
1177 m = f in manifests[0] and manifests[0] or manifests[1]
1178 m = f in manifests[0] and manifests[0] or manifests[1]
1178 t = self.file(f).read(m[f])
1179 t = self.file(f).read(m[f])
1179 self.wwrite(f, t, m.flags(f))
1180 self.wwrite(f, t, m.flags(f))
1180 self.dirstate.normal(f)
1181 self.dirstate.normal(f)
1181 finally:
1182 finally:
1182 del wlock
1183 del wlock
1183
1184
1184 def copy(self, source, dest):
1185 def copy(self, source, dest):
1185 wlock = None
1186 wlock = None
1186 try:
1187 try:
1187 p = self.wjoin(dest)
1188 p = self.wjoin(dest)
1188 if not (os.path.exists(p) or os.path.islink(p)):
1189 if not (os.path.exists(p) or os.path.islink(p)):
1189 self.ui.warn(_("%s does not exist!\n") % dest)
1190 self.ui.warn(_("%s does not exist!\n") % dest)
1190 elif not (os.path.isfile(p) or os.path.islink(p)):
1191 elif not (os.path.isfile(p) or os.path.islink(p)):
1191 self.ui.warn(_("copy failed: %s is not a file or a "
1192 self.ui.warn(_("copy failed: %s is not a file or a "
1192 "symbolic link\n") % dest)
1193 "symbolic link\n") % dest)
1193 else:
1194 else:
1194 wlock = self.wlock()
1195 wlock = self.wlock()
1195 if self.dirstate[dest] in '?r':
1196 if self.dirstate[dest] in '?r':
1196 self.dirstate.add(dest)
1197 self.dirstate.add(dest)
1197 self.dirstate.copy(source, dest)
1198 self.dirstate.copy(source, dest)
1198 finally:
1199 finally:
1199 del wlock
1200 del wlock
1200
1201
1201 def heads(self, start=None, closed=True):
1202 def heads(self, start=None, closed=True):
1202 heads = self.changelog.heads(start)
1203 heads = self.changelog.heads(start)
1203 def display(head):
1204 def display(head):
1204 if closed:
1205 if closed:
1205 return True
1206 return True
1206 extras = self.changelog.read(head)[5]
1207 extras = self.changelog.read(head)[5]
1207 return ('close' not in extras)
1208 return ('close' not in extras)
1208 # sort the output in rev descending order
1209 # sort the output in rev descending order
1209 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1210 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1210 return [n for (r, n) in util.sort(heads)]
1211 return [n for (r, n) in util.sort(heads)]
1211
1212
1212 def branchheads(self, branch=None, start=None, closed=True):
1213 def branchheads(self, branch=None, start=None, closed=True):
1213 if branch is None:
1214 if branch is None:
1214 branch = self[None].branch()
1215 branch = self[None].branch()
1215 branches = self._branchheads()
1216 branches = self._branchheads()
1216 if branch not in branches:
1217 if branch not in branches:
1217 return []
1218 return []
1218 bheads = branches[branch]
1219 bheads = branches[branch]
1219 # the cache returns heads ordered lowest to highest
1220 # the cache returns heads ordered lowest to highest
1220 bheads.reverse()
1221 bheads.reverse()
1221 if start is not None:
1222 if start is not None:
1222 # filter out the heads that cannot be reached from startrev
1223 # filter out the heads that cannot be reached from startrev
1223 bheads = self.changelog.nodesbetween([start], bheads)[2]
1224 bheads = self.changelog.nodesbetween([start], bheads)[2]
1224 if not closed:
1225 if not closed:
1225 bheads = [h for h in bheads if
1226 bheads = [h for h in bheads if
1226 ('close' not in self.changelog.read(h)[5])]
1227 ('close' not in self.changelog.read(h)[5])]
1227 return bheads
1228 return bheads
1228
1229
1229 def branches(self, nodes):
1230 def branches(self, nodes):
1230 if not nodes:
1231 if not nodes:
1231 nodes = [self.changelog.tip()]
1232 nodes = [self.changelog.tip()]
1232 b = []
1233 b = []
1233 for n in nodes:
1234 for n in nodes:
1234 t = n
1235 t = n
1235 while 1:
1236 while 1:
1236 p = self.changelog.parents(n)
1237 p = self.changelog.parents(n)
1237 if p[1] != nullid or p[0] == nullid:
1238 if p[1] != nullid or p[0] == nullid:
1238 b.append((t, n, p[0], p[1]))
1239 b.append((t, n, p[0], p[1]))
1239 break
1240 break
1240 n = p[0]
1241 n = p[0]
1241 return b
1242 return b
1242
1243
1243 def between(self, pairs):
1244 def between(self, pairs):
1244 r = []
1245 r = []
1245
1246
1246 for top, bottom in pairs:
1247 for top, bottom in pairs:
1247 n, l, i = top, [], 0
1248 n, l, i = top, [], 0
1248 f = 1
1249 f = 1
1249
1250
1250 while n != bottom and n != nullid:
1251 while n != bottom and n != nullid:
1251 p = self.changelog.parents(n)[0]
1252 p = self.changelog.parents(n)[0]
1252 if i == f:
1253 if i == f:
1253 l.append(n)
1254 l.append(n)
1254 f = f * 2
1255 f = f * 2
1255 n = p
1256 n = p
1256 i += 1
1257 i += 1
1257
1258
1258 r.append(l)
1259 r.append(l)
1259
1260
1260 return r
1261 return r
1261
1262
1262 def findincoming(self, remote, base=None, heads=None, force=False):
1263 def findincoming(self, remote, base=None, heads=None, force=False):
1263 """Return list of roots of the subsets of missing nodes from remote
1264 """Return list of roots of the subsets of missing nodes from remote
1264
1265
1265 If base dict is specified, assume that these nodes and their parents
1266 If base dict is specified, assume that these nodes and their parents
1266 exist on the remote side and that no child of a node of base exists
1267 exist on the remote side and that no child of a node of base exists
1267 in both remote and self.
1268 in both remote and self.
1268 Furthermore base will be updated to include the nodes that exists
1269 Furthermore base will be updated to include the nodes that exists
1269 in self and remote but no children exists in self and remote.
1270 in self and remote but no children exists in self and remote.
1270 If a list of heads is specified, return only nodes which are heads
1271 If a list of heads is specified, return only nodes which are heads
1271 or ancestors of these heads.
1272 or ancestors of these heads.
1272
1273
1273 All the ancestors of base are in self and in remote.
1274 All the ancestors of base are in self and in remote.
1274 All the descendants of the list returned are missing in self.
1275 All the descendants of the list returned are missing in self.
1275 (and so we know that the rest of the nodes are missing in remote, see
1276 (and so we know that the rest of the nodes are missing in remote, see
1276 outgoing)
1277 outgoing)
1277 """
1278 """
1278 return self.findcommonincoming(remote, base, heads, force)[1]
1279 return self.findcommonincoming(remote, base, heads, force)[1]
1279
1280
1280 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1281 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1281 """Return a tuple (common, missing roots, heads) used to identify
1282 """Return a tuple (common, missing roots, heads) used to identify
1282 missing nodes from remote.
1283 missing nodes from remote.
1283
1284
1284 If base dict is specified, assume that these nodes and their parents
1285 If base dict is specified, assume that these nodes and their parents
1285 exist on the remote side and that no child of a node of base exists
1286 exist on the remote side and that no child of a node of base exists
1286 in both remote and self.
1287 in both remote and self.
1287 Furthermore base will be updated to include the nodes that exists
1288 Furthermore base will be updated to include the nodes that exists
1288 in self and remote but no children exists in self and remote.
1289 in self and remote but no children exists in self and remote.
1289 If a list of heads is specified, return only nodes which are heads
1290 If a list of heads is specified, return only nodes which are heads
1290 or ancestors of these heads.
1291 or ancestors of these heads.
1291
1292
1292 All the ancestors of base are in self and in remote.
1293 All the ancestors of base are in self and in remote.
1293 """
1294 """
1294 m = self.changelog.nodemap
1295 m = self.changelog.nodemap
1295 search = []
1296 search = []
1296 fetch = {}
1297 fetch = {}
1297 seen = {}
1298 seen = {}
1298 seenbranch = {}
1299 seenbranch = {}
1299 if base == None:
1300 if base == None:
1300 base = {}
1301 base = {}
1301
1302
1302 if not heads:
1303 if not heads:
1303 heads = remote.heads()
1304 heads = remote.heads()
1304
1305
1305 if self.changelog.tip() == nullid:
1306 if self.changelog.tip() == nullid:
1306 base[nullid] = 1
1307 base[nullid] = 1
1307 if heads != [nullid]:
1308 if heads != [nullid]:
1308 return [nullid], [nullid], list(heads)
1309 return [nullid], [nullid], list(heads)
1309 return [nullid], [], []
1310 return [nullid], [], []
1310
1311
1311 # assume we're closer to the tip than the root
1312 # assume we're closer to the tip than the root
1312 # and start by examining the heads
1313 # and start by examining the heads
1313 self.ui.status(_("searching for changes\n"))
1314 self.ui.status(_("searching for changes\n"))
1314
1315
1315 unknown = []
1316 unknown = []
1316 for h in heads:
1317 for h in heads:
1317 if h not in m:
1318 if h not in m:
1318 unknown.append(h)
1319 unknown.append(h)
1319 else:
1320 else:
1320 base[h] = 1
1321 base[h] = 1
1321
1322
1322 heads = unknown
1323 heads = unknown
1323 if not unknown:
1324 if not unknown:
1324 return base.keys(), [], []
1325 return base.keys(), [], []
1325
1326
1326 req = dict.fromkeys(unknown)
1327 req = dict.fromkeys(unknown)
1327 reqcnt = 0
1328 reqcnt = 0
1328
1329
1329 # search through remote branches
1330 # search through remote branches
1330 # a 'branch' here is a linear segment of history, with four parts:
1331 # a 'branch' here is a linear segment of history, with four parts:
1331 # head, root, first parent, second parent
1332 # head, root, first parent, second parent
1332 # (a branch always has two parents (or none) by definition)
1333 # (a branch always has two parents (or none) by definition)
1333 unknown = remote.branches(unknown)
1334 unknown = remote.branches(unknown)
1334 while unknown:
1335 while unknown:
1335 r = []
1336 r = []
1336 while unknown:
1337 while unknown:
1337 n = unknown.pop(0)
1338 n = unknown.pop(0)
1338 if n[0] in seen:
1339 if n[0] in seen:
1339 continue
1340 continue
1340
1341
1341 self.ui.debug(_("examining %s:%s\n")
1342 self.ui.debug(_("examining %s:%s\n")
1342 % (short(n[0]), short(n[1])))
1343 % (short(n[0]), short(n[1])))
1343 if n[0] == nullid: # found the end of the branch
1344 if n[0] == nullid: # found the end of the branch
1344 pass
1345 pass
1345 elif n in seenbranch:
1346 elif n in seenbranch:
1346 self.ui.debug(_("branch already found\n"))
1347 self.ui.debug(_("branch already found\n"))
1347 continue
1348 continue
1348 elif n[1] and n[1] in m: # do we know the base?
1349 elif n[1] and n[1] in m: # do we know the base?
1349 self.ui.debug(_("found incomplete branch %s:%s\n")
1350 self.ui.debug(_("found incomplete branch %s:%s\n")
1350 % (short(n[0]), short(n[1])))
1351 % (short(n[0]), short(n[1])))
1351 search.append(n[0:2]) # schedule branch range for scanning
1352 search.append(n[0:2]) # schedule branch range for scanning
1352 seenbranch[n] = 1
1353 seenbranch[n] = 1
1353 else:
1354 else:
1354 if n[1] not in seen and n[1] not in fetch:
1355 if n[1] not in seen and n[1] not in fetch:
1355 if n[2] in m and n[3] in m:
1356 if n[2] in m and n[3] in m:
1356 self.ui.debug(_("found new changeset %s\n") %
1357 self.ui.debug(_("found new changeset %s\n") %
1357 short(n[1]))
1358 short(n[1]))
1358 fetch[n[1]] = 1 # earliest unknown
1359 fetch[n[1]] = 1 # earliest unknown
1359 for p in n[2:4]:
1360 for p in n[2:4]:
1360 if p in m:
1361 if p in m:
1361 base[p] = 1 # latest known
1362 base[p] = 1 # latest known
1362
1363
1363 for p in n[2:4]:
1364 for p in n[2:4]:
1364 if p not in req and p not in m:
1365 if p not in req and p not in m:
1365 r.append(p)
1366 r.append(p)
1366 req[p] = 1
1367 req[p] = 1
1367 seen[n[0]] = 1
1368 seen[n[0]] = 1
1368
1369
1369 if r:
1370 if r:
1370 reqcnt += 1
1371 reqcnt += 1
1371 self.ui.debug(_("request %d: %s\n") %
1372 self.ui.debug(_("request %d: %s\n") %
1372 (reqcnt, " ".join(map(short, r))))
1373 (reqcnt, " ".join(map(short, r))))
1373 for p in xrange(0, len(r), 10):
1374 for p in xrange(0, len(r), 10):
1374 for b in remote.branches(r[p:p+10]):
1375 for b in remote.branches(r[p:p+10]):
1375 self.ui.debug(_("received %s:%s\n") %
1376 self.ui.debug(_("received %s:%s\n") %
1376 (short(b[0]), short(b[1])))
1377 (short(b[0]), short(b[1])))
1377 unknown.append(b)
1378 unknown.append(b)
1378
1379
1379 # do binary search on the branches we found
1380 # do binary search on the branches we found
1380 while search:
1381 while search:
1381 newsearch = []
1382 newsearch = []
1382 reqcnt += 1
1383 reqcnt += 1
1383 for n, l in zip(search, remote.between(search)):
1384 for n, l in zip(search, remote.between(search)):
1384 l.append(n[1])
1385 l.append(n[1])
1385 p = n[0]
1386 p = n[0]
1386 f = 1
1387 f = 1
1387 for i in l:
1388 for i in l:
1388 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1389 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1389 if i in m:
1390 if i in m:
1390 if f <= 2:
1391 if f <= 2:
1391 self.ui.debug(_("found new branch changeset %s\n") %
1392 self.ui.debug(_("found new branch changeset %s\n") %
1392 short(p))
1393 short(p))
1393 fetch[p] = 1
1394 fetch[p] = 1
1394 base[i] = 1
1395 base[i] = 1
1395 else:
1396 else:
1396 self.ui.debug(_("narrowed branch search to %s:%s\n")
1397 self.ui.debug(_("narrowed branch search to %s:%s\n")
1397 % (short(p), short(i)))
1398 % (short(p), short(i)))
1398 newsearch.append((p, i))
1399 newsearch.append((p, i))
1399 break
1400 break
1400 p, f = i, f * 2
1401 p, f = i, f * 2
1401 search = newsearch
1402 search = newsearch
1402
1403
1403 # sanity check our fetch list
1404 # sanity check our fetch list
1404 for f in fetch.keys():
1405 for f in fetch.keys():
1405 if f in m:
1406 if f in m:
1406 raise error.RepoError(_("already have changeset ")
1407 raise error.RepoError(_("already have changeset ")
1407 + short(f[:4]))
1408 + short(f[:4]))
1408
1409
1409 if base.keys() == [nullid]:
1410 if base.keys() == [nullid]:
1410 if force:
1411 if force:
1411 self.ui.warn(_("warning: repository is unrelated\n"))
1412 self.ui.warn(_("warning: repository is unrelated\n"))
1412 else:
1413 else:
1413 raise util.Abort(_("repository is unrelated"))
1414 raise util.Abort(_("repository is unrelated"))
1414
1415
1415 self.ui.debug(_("found new changesets starting at ") +
1416 self.ui.debug(_("found new changesets starting at ") +
1416 " ".join([short(f) for f in fetch]) + "\n")
1417 " ".join([short(f) for f in fetch]) + "\n")
1417
1418
1418 self.ui.debug(_("%d total queries\n") % reqcnt)
1419 self.ui.debug(_("%d total queries\n") % reqcnt)
1419
1420
1420 return base.keys(), fetch.keys(), heads
1421 return base.keys(), fetch.keys(), heads
1421
1422
1422 def findoutgoing(self, remote, base=None, heads=None, force=False):
1423 def findoutgoing(self, remote, base=None, heads=None, force=False):
1423 """Return list of nodes that are roots of subsets not in remote
1424 """Return list of nodes that are roots of subsets not in remote
1424
1425
1425 If base dict is specified, assume that these nodes and their parents
1426 If base dict is specified, assume that these nodes and their parents
1426 exist on the remote side.
1427 exist on the remote side.
1427 If a list of heads is specified, return only nodes which are heads
1428 If a list of heads is specified, return only nodes which are heads
1428 or ancestors of these heads, and return a second element which
1429 or ancestors of these heads, and return a second element which
1429 contains all remote heads which get new children.
1430 contains all remote heads which get new children.
1430 """
1431 """
1431 if base == None:
1432 if base == None:
1432 base = {}
1433 base = {}
1433 self.findincoming(remote, base, heads, force=force)
1434 self.findincoming(remote, base, heads, force=force)
1434
1435
1435 self.ui.debug(_("common changesets up to ")
1436 self.ui.debug(_("common changesets up to ")
1436 + " ".join(map(short, base.keys())) + "\n")
1437 + " ".join(map(short, base.keys())) + "\n")
1437
1438
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440
1440 # prune everything remote has from the tree
1441 # prune everything remote has from the tree
1441 del remain[nullid]
1442 del remain[nullid]
1442 remove = base.keys()
1443 remove = base.keys()
1443 while remove:
1444 while remove:
1444 n = remove.pop(0)
1445 n = remove.pop(0)
1445 if n in remain:
1446 if n in remain:
1446 del remain[n]
1447 del remain[n]
1447 for p in self.changelog.parents(n):
1448 for p in self.changelog.parents(n):
1448 remove.append(p)
1449 remove.append(p)
1449
1450
1450 # find every node whose parents have been pruned
1451 # find every node whose parents have been pruned
1451 subset = []
1452 subset = []
1452 # find every remote head that will get new children
1453 # find every remote head that will get new children
1453 updated_heads = {}
1454 updated_heads = {}
1454 for n in remain:
1455 for n in remain:
1455 p1, p2 = self.changelog.parents(n)
1456 p1, p2 = self.changelog.parents(n)
1456 if p1 not in remain and p2 not in remain:
1457 if p1 not in remain and p2 not in remain:
1457 subset.append(n)
1458 subset.append(n)
1458 if heads:
1459 if heads:
1459 if p1 in heads:
1460 if p1 in heads:
1460 updated_heads[p1] = True
1461 updated_heads[p1] = True
1461 if p2 in heads:
1462 if p2 in heads:
1462 updated_heads[p2] = True
1463 updated_heads[p2] = True
1463
1464
1464 # this is the set of all roots we have to push
1465 # this is the set of all roots we have to push
1465 if heads:
1466 if heads:
1466 return subset, updated_heads.keys()
1467 return subset, updated_heads.keys()
1467 else:
1468 else:
1468 return subset
1469 return subset
1469
1470
1470 def pull(self, remote, heads=None, force=False):
1471 def pull(self, remote, heads=None, force=False):
1471 lock = self.lock()
1472 lock = self.lock()
1472 try:
1473 try:
1473 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1474 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1474 force=force)
1475 force=force)
1475 if fetch == [nullid]:
1476 if fetch == [nullid]:
1476 self.ui.status(_("requesting all changes\n"))
1477 self.ui.status(_("requesting all changes\n"))
1477
1478
1478 if not fetch:
1479 if not fetch:
1479 self.ui.status(_("no changes found\n"))
1480 self.ui.status(_("no changes found\n"))
1480 return 0
1481 return 0
1481
1482
1482 if heads is None and remote.capable('changegroupsubset'):
1483 if heads is None and remote.capable('changegroupsubset'):
1483 heads = rheads
1484 heads = rheads
1484
1485
1485 if heads is None:
1486 if heads is None:
1486 cg = remote.changegroup(fetch, 'pull')
1487 cg = remote.changegroup(fetch, 'pull')
1487 else:
1488 else:
1488 if not remote.capable('changegroupsubset'):
1489 if not remote.capable('changegroupsubset'):
1489 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1490 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1490 cg = remote.changegroupsubset(fetch, heads, 'pull')
1491 cg = remote.changegroupsubset(fetch, heads, 'pull')
1491 return self.addchangegroup(cg, 'pull', remote.url())
1492 return self.addchangegroup(cg, 'pull', remote.url())
1492 finally:
1493 finally:
1493 del lock
1494 del lock
1494
1495
1495 def push(self, remote, force=False, revs=None):
1496 def push(self, remote, force=False, revs=None):
1496 # there are two ways to push to remote repo:
1497 # there are two ways to push to remote repo:
1497 #
1498 #
1498 # addchangegroup assumes local user can lock remote
1499 # addchangegroup assumes local user can lock remote
1499 # repo (local filesystem, old ssh servers).
1500 # repo (local filesystem, old ssh servers).
1500 #
1501 #
1501 # unbundle assumes local user cannot lock remote repo (new ssh
1502 # unbundle assumes local user cannot lock remote repo (new ssh
1502 # servers, http servers).
1503 # servers, http servers).
1503
1504
1504 if remote.capable('unbundle'):
1505 if remote.capable('unbundle'):
1505 return self.push_unbundle(remote, force, revs)
1506 return self.push_unbundle(remote, force, revs)
1506 return self.push_addchangegroup(remote, force, revs)
1507 return self.push_addchangegroup(remote, force, revs)
1507
1508
1508 def prepush(self, remote, force, revs):
1509 def prepush(self, remote, force, revs):
1509 common = {}
1510 common = {}
1510 remote_heads = remote.heads()
1511 remote_heads = remote.heads()
1511 inc = self.findincoming(remote, common, remote_heads, force=force)
1512 inc = self.findincoming(remote, common, remote_heads, force=force)
1512
1513
1513 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1514 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1514 if revs is not None:
1515 if revs is not None:
1515 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1516 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1516 else:
1517 else:
1517 bases, heads = update, self.changelog.heads()
1518 bases, heads = update, self.changelog.heads()
1518
1519
1519 if not bases:
1520 if not bases:
1520 self.ui.status(_("no changes found\n"))
1521 self.ui.status(_("no changes found\n"))
1521 return None, 1
1522 return None, 1
1522 elif not force:
1523 elif not force:
1523 # check if we're creating new remote heads
1524 # check if we're creating new remote heads
1524 # to be a remote head after push, node must be either
1525 # to be a remote head after push, node must be either
1525 # - unknown locally
1526 # - unknown locally
1526 # - a local outgoing head descended from update
1527 # - a local outgoing head descended from update
1527 # - a remote head that's known locally and not
1528 # - a remote head that's known locally and not
1528 # ancestral to an outgoing head
1529 # ancestral to an outgoing head
1529
1530
1530 warn = 0
1531 warn = 0
1531
1532
1532 if remote_heads == [nullid]:
1533 if remote_heads == [nullid]:
1533 warn = 0
1534 warn = 0
1534 elif not revs and len(heads) > len(remote_heads):
1535 elif not revs and len(heads) > len(remote_heads):
1535 warn = 1
1536 warn = 1
1536 else:
1537 else:
1537 newheads = list(heads)
1538 newheads = list(heads)
1538 for r in remote_heads:
1539 for r in remote_heads:
1539 if r in self.changelog.nodemap:
1540 if r in self.changelog.nodemap:
1540 desc = self.changelog.heads(r, heads)
1541 desc = self.changelog.heads(r, heads)
1541 l = [h for h in heads if h in desc]
1542 l = [h for h in heads if h in desc]
1542 if not l:
1543 if not l:
1543 newheads.append(r)
1544 newheads.append(r)
1544 else:
1545 else:
1545 newheads.append(r)
1546 newheads.append(r)
1546 if len(newheads) > len(remote_heads):
1547 if len(newheads) > len(remote_heads):
1547 warn = 1
1548 warn = 1
1548
1549
1549 if warn:
1550 if warn:
1550 self.ui.warn(_("abort: push creates new remote heads!\n"))
1551 self.ui.warn(_("abort: push creates new remote heads!\n"))
1551 self.ui.status(_("(did you forget to merge?"
1552 self.ui.status(_("(did you forget to merge?"
1552 " use push -f to force)\n"))
1553 " use push -f to force)\n"))
1553 return None, 0
1554 return None, 0
1554 elif inc:
1555 elif inc:
1555 self.ui.warn(_("note: unsynced remote changes!\n"))
1556 self.ui.warn(_("note: unsynced remote changes!\n"))
1556
1557
1557
1558
1558 if revs is None:
1559 if revs is None:
1559 # use the fast path, no race possible on push
1560 # use the fast path, no race possible on push
1560 cg = self._changegroup(common.keys(), 'push')
1561 cg = self._changegroup(common.keys(), 'push')
1561 else:
1562 else:
1562 cg = self.changegroupsubset(update, revs, 'push')
1563 cg = self.changegroupsubset(update, revs, 'push')
1563 return cg, remote_heads
1564 return cg, remote_heads
1564
1565
1565 def push_addchangegroup(self, remote, force, revs):
1566 def push_addchangegroup(self, remote, force, revs):
1566 lock = remote.lock()
1567 lock = remote.lock()
1567 try:
1568 try:
1568 ret = self.prepush(remote, force, revs)
1569 ret = self.prepush(remote, force, revs)
1569 if ret[0] is not None:
1570 if ret[0] is not None:
1570 cg, remote_heads = ret
1571 cg, remote_heads = ret
1571 return remote.addchangegroup(cg, 'push', self.url())
1572 return remote.addchangegroup(cg, 'push', self.url())
1572 return ret[1]
1573 return ret[1]
1573 finally:
1574 finally:
1574 del lock
1575 del lock
1575
1576
1576 def push_unbundle(self, remote, force, revs):
1577 def push_unbundle(self, remote, force, revs):
1577 # local repo finds heads on server, finds out what revs it
1578 # local repo finds heads on server, finds out what revs it
1578 # must push. once revs transferred, if server finds it has
1579 # must push. once revs transferred, if server finds it has
1579 # different heads (someone else won commit/push race), server
1580 # different heads (someone else won commit/push race), server
1580 # aborts.
1581 # aborts.
1581
1582
1582 ret = self.prepush(remote, force, revs)
1583 ret = self.prepush(remote, force, revs)
1583 if ret[0] is not None:
1584 if ret[0] is not None:
1584 cg, remote_heads = ret
1585 cg, remote_heads = ret
1585 if force: remote_heads = ['force']
1586 if force: remote_heads = ['force']
1586 return remote.unbundle(cg, remote_heads, 'push')
1587 return remote.unbundle(cg, remote_heads, 'push')
1587 return ret[1]
1588 return ret[1]
1588
1589
1589 def changegroupinfo(self, nodes, source):
1590 def changegroupinfo(self, nodes, source):
1590 if self.ui.verbose or source == 'bundle':
1591 if self.ui.verbose or source == 'bundle':
1591 self.ui.status(_("%d changesets found\n") % len(nodes))
1592 self.ui.status(_("%d changesets found\n") % len(nodes))
1592 if self.ui.debugflag:
1593 if self.ui.debugflag:
1593 self.ui.debug(_("list of changesets:\n"))
1594 self.ui.debug(_("list of changesets:\n"))
1594 for node in nodes:
1595 for node in nodes:
1595 self.ui.debug("%s\n" % hex(node))
1596 self.ui.debug("%s\n" % hex(node))
1596
1597
1597 def changegroupsubset(self, bases, heads, source, extranodes=None):
1598 def changegroupsubset(self, bases, heads, source, extranodes=None):
1598 """This function generates a changegroup consisting of all the nodes
1599 """This function generates a changegroup consisting of all the nodes
1599 that are descendents of any of the bases, and ancestors of any of
1600 that are descendents of any of the bases, and ancestors of any of
1600 the heads.
1601 the heads.
1601
1602
1602 It is fairly complex as determining which filenodes and which
1603 It is fairly complex as determining which filenodes and which
1603 manifest nodes need to be included for the changeset to be complete
1604 manifest nodes need to be included for the changeset to be complete
1604 is non-trivial.
1605 is non-trivial.
1605
1606
1606 Another wrinkle is doing the reverse, figuring out which changeset in
1607 Another wrinkle is doing the reverse, figuring out which changeset in
1607 the changegroup a particular filenode or manifestnode belongs to.
1608 the changegroup a particular filenode or manifestnode belongs to.
1608
1609
1609 The caller can specify some nodes that must be included in the
1610 The caller can specify some nodes that must be included in the
1610 changegroup using the extranodes argument. It should be a dict
1611 changegroup using the extranodes argument. It should be a dict
1611 where the keys are the filenames (or 1 for the manifest), and the
1612 where the keys are the filenames (or 1 for the manifest), and the
1612 values are lists of (node, linknode) tuples, where node is a wanted
1613 values are lists of (node, linknode) tuples, where node is a wanted
1613 node and linknode is the changelog node that should be transmitted as
1614 node and linknode is the changelog node that should be transmitted as
1614 the linkrev.
1615 the linkrev.
1615 """
1616 """
1616
1617
1617 if extranodes is None:
1618 if extranodes is None:
1618 # can we go through the fast path ?
1619 # can we go through the fast path ?
1619 heads.sort()
1620 heads.sort()
1620 allheads = self.heads()
1621 allheads = self.heads()
1621 allheads.sort()
1622 allheads.sort()
1622 if heads == allheads:
1623 if heads == allheads:
1623 common = []
1624 common = []
1624 # parents of bases are known from both sides
1625 # parents of bases are known from both sides
1625 for n in bases:
1626 for n in bases:
1626 for p in self.changelog.parents(n):
1627 for p in self.changelog.parents(n):
1627 if p != nullid:
1628 if p != nullid:
1628 common.append(p)
1629 common.append(p)
1629 return self._changegroup(common, source)
1630 return self._changegroup(common, source)
1630
1631
1631 self.hook('preoutgoing', throw=True, source=source)
1632 self.hook('preoutgoing', throw=True, source=source)
1632
1633
1633 # Set up some initial variables
1634 # Set up some initial variables
1634 # Make it easy to refer to self.changelog
1635 # Make it easy to refer to self.changelog
1635 cl = self.changelog
1636 cl = self.changelog
1636 # msng is short for missing - compute the list of changesets in this
1637 # msng is short for missing - compute the list of changesets in this
1637 # changegroup.
1638 # changegroup.
1638 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1639 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1639 self.changegroupinfo(msng_cl_lst, source)
1640 self.changegroupinfo(msng_cl_lst, source)
1640 # Some bases may turn out to be superfluous, and some heads may be
1641 # Some bases may turn out to be superfluous, and some heads may be
1641 # too. nodesbetween will return the minimal set of bases and heads
1642 # too. nodesbetween will return the minimal set of bases and heads
1642 # necessary to re-create the changegroup.
1643 # necessary to re-create the changegroup.
1643
1644
1644 # Known heads are the list of heads that it is assumed the recipient
1645 # Known heads are the list of heads that it is assumed the recipient
1645 # of this changegroup will know about.
1646 # of this changegroup will know about.
1646 knownheads = {}
1647 knownheads = {}
1647 # We assume that all parents of bases are known heads.
1648 # We assume that all parents of bases are known heads.
1648 for n in bases:
1649 for n in bases:
1649 for p in cl.parents(n):
1650 for p in cl.parents(n):
1650 if p != nullid:
1651 if p != nullid:
1651 knownheads[p] = 1
1652 knownheads[p] = 1
1652 knownheads = knownheads.keys()
1653 knownheads = knownheads.keys()
1653 if knownheads:
1654 if knownheads:
1654 # Now that we know what heads are known, we can compute which
1655 # Now that we know what heads are known, we can compute which
1655 # changesets are known. The recipient must know about all
1656 # changesets are known. The recipient must know about all
1656 # changesets required to reach the known heads from the null
1657 # changesets required to reach the known heads from the null
1657 # changeset.
1658 # changeset.
1658 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1659 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1659 junk = None
1660 junk = None
1660 # Transform the list into an ersatz set.
1661 # Transform the list into an ersatz set.
1661 has_cl_set = dict.fromkeys(has_cl_set)
1662 has_cl_set = dict.fromkeys(has_cl_set)
1662 else:
1663 else:
1663 # If there were no known heads, the recipient cannot be assumed to
1664 # If there were no known heads, the recipient cannot be assumed to
1664 # know about any changesets.
1665 # know about any changesets.
1665 has_cl_set = {}
1666 has_cl_set = {}
1666
1667
1667 # Make it easy to refer to self.manifest
1668 # Make it easy to refer to self.manifest
1668 mnfst = self.manifest
1669 mnfst = self.manifest
1669 # We don't know which manifests are missing yet
1670 # We don't know which manifests are missing yet
1670 msng_mnfst_set = {}
1671 msng_mnfst_set = {}
1671 # Nor do we know which filenodes are missing.
1672 # Nor do we know which filenodes are missing.
1672 msng_filenode_set = {}
1673 msng_filenode_set = {}
1673
1674
1674 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1675 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1675 junk = None
1676 junk = None
1676
1677
1677 # A changeset always belongs to itself, so the changenode lookup
1678 # A changeset always belongs to itself, so the changenode lookup
1678 # function for a changenode is identity.
1679 # function for a changenode is identity.
1679 def identity(x):
1680 def identity(x):
1680 return x
1681 return x
1681
1682
1682 # A function generating function. Sets up an environment for the
1683 # A function generating function. Sets up an environment for the
1683 # inner function.
1684 # inner function.
1684 def cmp_by_rev_func(revlog):
1685 def cmp_by_rev_func(revlog):
1685 # Compare two nodes by their revision number in the environment's
1686 # Compare two nodes by their revision number in the environment's
1686 # revision history. Since the revision number both represents the
1687 # revision history. Since the revision number both represents the
1687 # most efficient order to read the nodes in, and represents a
1688 # most efficient order to read the nodes in, and represents a
1688 # topological sorting of the nodes, this function is often useful.
1689 # topological sorting of the nodes, this function is often useful.
1689 def cmp_by_rev(a, b):
1690 def cmp_by_rev(a, b):
1690 return cmp(revlog.rev(a), revlog.rev(b))
1691 return cmp(revlog.rev(a), revlog.rev(b))
1691 return cmp_by_rev
1692 return cmp_by_rev
1692
1693
1693 # If we determine that a particular file or manifest node must be a
1694 # If we determine that a particular file or manifest node must be a
1694 # node that the recipient of the changegroup will already have, we can
1695 # node that the recipient of the changegroup will already have, we can
1695 # also assume the recipient will have all the parents. This function
1696 # also assume the recipient will have all the parents. This function
1696 # prunes them from the set of missing nodes.
1697 # prunes them from the set of missing nodes.
1697 def prune_parents(revlog, hasset, msngset):
1698 def prune_parents(revlog, hasset, msngset):
1698 haslst = hasset.keys()
1699 haslst = hasset.keys()
1699 haslst.sort(cmp_by_rev_func(revlog))
1700 haslst.sort(cmp_by_rev_func(revlog))
1700 for node in haslst:
1701 for node in haslst:
1701 parentlst = [p for p in revlog.parents(node) if p != nullid]
1702 parentlst = [p for p in revlog.parents(node) if p != nullid]
1702 while parentlst:
1703 while parentlst:
1703 n = parentlst.pop()
1704 n = parentlst.pop()
1704 if n not in hasset:
1705 if n not in hasset:
1705 hasset[n] = 1
1706 hasset[n] = 1
1706 p = [p for p in revlog.parents(n) if p != nullid]
1707 p = [p for p in revlog.parents(n) if p != nullid]
1707 parentlst.extend(p)
1708 parentlst.extend(p)
1708 for n in hasset:
1709 for n in hasset:
1709 msngset.pop(n, None)
1710 msngset.pop(n, None)
1710
1711
1711 # This is a function generating function used to set up an environment
1712 # This is a function generating function used to set up an environment
1712 # for the inner function to execute in.
1713 # for the inner function to execute in.
1713 def manifest_and_file_collector(changedfileset):
1714 def manifest_and_file_collector(changedfileset):
1714 # This is an information gathering function that gathers
1715 # This is an information gathering function that gathers
1715 # information from each changeset node that goes out as part of
1716 # information from each changeset node that goes out as part of
1716 # the changegroup. The information gathered is a list of which
1717 # the changegroup. The information gathered is a list of which
1717 # manifest nodes are potentially required (the recipient may
1718 # manifest nodes are potentially required (the recipient may
1718 # already have them) and total list of all files which were
1719 # already have them) and total list of all files which were
1719 # changed in any changeset in the changegroup.
1720 # changed in any changeset in the changegroup.
1720 #
1721 #
1721 # We also remember the first changenode we saw any manifest
1722 # We also remember the first changenode we saw any manifest
1722 # referenced by so we can later determine which changenode 'owns'
1723 # referenced by so we can later determine which changenode 'owns'
1723 # the manifest.
1724 # the manifest.
1724 def collect_manifests_and_files(clnode):
1725 def collect_manifests_and_files(clnode):
1725 c = cl.read(clnode)
1726 c = cl.read(clnode)
1726 for f in c[3]:
1727 for f in c[3]:
1727 # This is to make sure we only have one instance of each
1728 # This is to make sure we only have one instance of each
1728 # filename string for each filename.
1729 # filename string for each filename.
1729 changedfileset.setdefault(f, f)
1730 changedfileset.setdefault(f, f)
1730 msng_mnfst_set.setdefault(c[0], clnode)
1731 msng_mnfst_set.setdefault(c[0], clnode)
1731 return collect_manifests_and_files
1732 return collect_manifests_and_files
1732
1733
1733 # Figure out which manifest nodes (of the ones we think might be part
1734 # Figure out which manifest nodes (of the ones we think might be part
1734 # of the changegroup) the recipient must know about and remove them
1735 # of the changegroup) the recipient must know about and remove them
1735 # from the changegroup.
1736 # from the changegroup.
1736 def prune_manifests():
1737 def prune_manifests():
1737 has_mnfst_set = {}
1738 has_mnfst_set = {}
1738 for n in msng_mnfst_set:
1739 for n in msng_mnfst_set:
1739 # If a 'missing' manifest thinks it belongs to a changenode
1740 # If a 'missing' manifest thinks it belongs to a changenode
1740 # the recipient is assumed to have, obviously the recipient
1741 # the recipient is assumed to have, obviously the recipient
1741 # must have that manifest.
1742 # must have that manifest.
1742 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1743 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1743 if linknode in has_cl_set:
1744 if linknode in has_cl_set:
1744 has_mnfst_set[n] = 1
1745 has_mnfst_set[n] = 1
1745 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1746 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1746
1747
1747 # Use the information collected in collect_manifests_and_files to say
1748 # Use the information collected in collect_manifests_and_files to say
1748 # which changenode any manifestnode belongs to.
1749 # which changenode any manifestnode belongs to.
1749 def lookup_manifest_link(mnfstnode):
1750 def lookup_manifest_link(mnfstnode):
1750 return msng_mnfst_set[mnfstnode]
1751 return msng_mnfst_set[mnfstnode]
1751
1752
1752 # A function generating function that sets up the initial environment
1753 # A function generating function that sets up the initial environment
1753 # the inner function.
1754 # the inner function.
1754 def filenode_collector(changedfiles):
1755 def filenode_collector(changedfiles):
1755 next_rev = [0]
1756 next_rev = [0]
1756 # This gathers information from each manifestnode included in the
1757 # This gathers information from each manifestnode included in the
1757 # changegroup about which filenodes the manifest node references
1758 # changegroup about which filenodes the manifest node references
1758 # so we can include those in the changegroup too.
1759 # so we can include those in the changegroup too.
1759 #
1760 #
1760 # It also remembers which changenode each filenode belongs to. It
1761 # It also remembers which changenode each filenode belongs to. It
1761 # does this by assuming the a filenode belongs to the changenode
1762 # does this by assuming the a filenode belongs to the changenode
1762 # the first manifest that references it belongs to.
1763 # the first manifest that references it belongs to.
1763 def collect_msng_filenodes(mnfstnode):
1764 def collect_msng_filenodes(mnfstnode):
1764 r = mnfst.rev(mnfstnode)
1765 r = mnfst.rev(mnfstnode)
1765 if r == next_rev[0]:
1766 if r == next_rev[0]:
1766 # If the last rev we looked at was the one just previous,
1767 # If the last rev we looked at was the one just previous,
1767 # we only need to see a diff.
1768 # we only need to see a diff.
1768 deltamf = mnfst.readdelta(mnfstnode)
1769 deltamf = mnfst.readdelta(mnfstnode)
1769 # For each line in the delta
1770 # For each line in the delta
1770 for f, fnode in deltamf.iteritems():
1771 for f, fnode in deltamf.iteritems():
1771 f = changedfiles.get(f, None)
1772 f = changedfiles.get(f, None)
1772 # And if the file is in the list of files we care
1773 # And if the file is in the list of files we care
1773 # about.
1774 # about.
1774 if f is not None:
1775 if f is not None:
1775 # Get the changenode this manifest belongs to
1776 # Get the changenode this manifest belongs to
1776 clnode = msng_mnfst_set[mnfstnode]
1777 clnode = msng_mnfst_set[mnfstnode]
1777 # Create the set of filenodes for the file if
1778 # Create the set of filenodes for the file if
1778 # there isn't one already.
1779 # there isn't one already.
1779 ndset = msng_filenode_set.setdefault(f, {})
1780 ndset = msng_filenode_set.setdefault(f, {})
1780 # And set the filenode's changelog node to the
1781 # And set the filenode's changelog node to the
1781 # manifest's if it hasn't been set already.
1782 # manifest's if it hasn't been set already.
1782 ndset.setdefault(fnode, clnode)
1783 ndset.setdefault(fnode, clnode)
1783 else:
1784 else:
1784 # Otherwise we need a full manifest.
1785 # Otherwise we need a full manifest.
1785 m = mnfst.read(mnfstnode)
1786 m = mnfst.read(mnfstnode)
1786 # For every file in we care about.
1787 # For every file in we care about.
1787 for f in changedfiles:
1788 for f in changedfiles:
1788 fnode = m.get(f, None)
1789 fnode = m.get(f, None)
1789 # If it's in the manifest
1790 # If it's in the manifest
1790 if fnode is not None:
1791 if fnode is not None:
1791 # See comments above.
1792 # See comments above.
1792 clnode = msng_mnfst_set[mnfstnode]
1793 clnode = msng_mnfst_set[mnfstnode]
1793 ndset = msng_filenode_set.setdefault(f, {})
1794 ndset = msng_filenode_set.setdefault(f, {})
1794 ndset.setdefault(fnode, clnode)
1795 ndset.setdefault(fnode, clnode)
1795 # Remember the revision we hope to see next.
1796 # Remember the revision we hope to see next.
1796 next_rev[0] = r + 1
1797 next_rev[0] = r + 1
1797 return collect_msng_filenodes
1798 return collect_msng_filenodes
1798
1799
1799 # We have a list of filenodes we think we need for a file, lets remove
1800 # We have a list of filenodes we think we need for a file, lets remove
1800 # all those we now the recipient must have.
1801 # all those we now the recipient must have.
1801 def prune_filenodes(f, filerevlog):
1802 def prune_filenodes(f, filerevlog):
1802 msngset = msng_filenode_set[f]
1803 msngset = msng_filenode_set[f]
1803 hasset = {}
1804 hasset = {}
1804 # If a 'missing' filenode thinks it belongs to a changenode we
1805 # If a 'missing' filenode thinks it belongs to a changenode we
1805 # assume the recipient must have, then the recipient must have
1806 # assume the recipient must have, then the recipient must have
1806 # that filenode.
1807 # that filenode.
1807 for n in msngset:
1808 for n in msngset:
1808 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1809 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1809 if clnode in has_cl_set:
1810 if clnode in has_cl_set:
1810 hasset[n] = 1
1811 hasset[n] = 1
1811 prune_parents(filerevlog, hasset, msngset)
1812 prune_parents(filerevlog, hasset, msngset)
1812
1813
1813 # A function generator function that sets up the a context for the
1814 # A function generator function that sets up the a context for the
1814 # inner function.
1815 # inner function.
1815 def lookup_filenode_link_func(fname):
1816 def lookup_filenode_link_func(fname):
1816 msngset = msng_filenode_set[fname]
1817 msngset = msng_filenode_set[fname]
1817 # Lookup the changenode the filenode belongs to.
1818 # Lookup the changenode the filenode belongs to.
1818 def lookup_filenode_link(fnode):
1819 def lookup_filenode_link(fnode):
1819 return msngset[fnode]
1820 return msngset[fnode]
1820 return lookup_filenode_link
1821 return lookup_filenode_link
1821
1822
1822 # Add the nodes that were explicitly requested.
1823 # Add the nodes that were explicitly requested.
1823 def add_extra_nodes(name, nodes):
1824 def add_extra_nodes(name, nodes):
1824 if not extranodes or name not in extranodes:
1825 if not extranodes or name not in extranodes:
1825 return
1826 return
1826
1827
1827 for node, linknode in extranodes[name]:
1828 for node, linknode in extranodes[name]:
1828 if node not in nodes:
1829 if node not in nodes:
1829 nodes[node] = linknode
1830 nodes[node] = linknode
1830
1831
1831 # Now that we have all theses utility functions to help out and
1832 # Now that we have all theses utility functions to help out and
1832 # logically divide up the task, generate the group.
1833 # logically divide up the task, generate the group.
1833 def gengroup():
1834 def gengroup():
1834 # The set of changed files starts empty.
1835 # The set of changed files starts empty.
1835 changedfiles = {}
1836 changedfiles = {}
1836 # Create a changenode group generator that will call our functions
1837 # Create a changenode group generator that will call our functions
1837 # back to lookup the owning changenode and collect information.
1838 # back to lookup the owning changenode and collect information.
1838 group = cl.group(msng_cl_lst, identity,
1839 group = cl.group(msng_cl_lst, identity,
1839 manifest_and_file_collector(changedfiles))
1840 manifest_and_file_collector(changedfiles))
1840 for chnk in group:
1841 for chnk in group:
1841 yield chnk
1842 yield chnk
1842
1843
1843 # The list of manifests has been collected by the generator
1844 # The list of manifests has been collected by the generator
1844 # calling our functions back.
1845 # calling our functions back.
1845 prune_manifests()
1846 prune_manifests()
1846 add_extra_nodes(1, msng_mnfst_set)
1847 add_extra_nodes(1, msng_mnfst_set)
1847 msng_mnfst_lst = msng_mnfst_set.keys()
1848 msng_mnfst_lst = msng_mnfst_set.keys()
1848 # Sort the manifestnodes by revision number.
1849 # Sort the manifestnodes by revision number.
1849 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1850 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1850 # Create a generator for the manifestnodes that calls our lookup
1851 # Create a generator for the manifestnodes that calls our lookup
1851 # and data collection functions back.
1852 # and data collection functions back.
1852 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1853 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1853 filenode_collector(changedfiles))
1854 filenode_collector(changedfiles))
1854 for chnk in group:
1855 for chnk in group:
1855 yield chnk
1856 yield chnk
1856
1857
1857 # These are no longer needed, dereference and toss the memory for
1858 # These are no longer needed, dereference and toss the memory for
1858 # them.
1859 # them.
1859 msng_mnfst_lst = None
1860 msng_mnfst_lst = None
1860 msng_mnfst_set.clear()
1861 msng_mnfst_set.clear()
1861
1862
1862 if extranodes:
1863 if extranodes:
1863 for fname in extranodes:
1864 for fname in extranodes:
1864 if isinstance(fname, int):
1865 if isinstance(fname, int):
1865 continue
1866 continue
1866 msng_filenode_set.setdefault(fname, {})
1867 msng_filenode_set.setdefault(fname, {})
1867 changedfiles[fname] = 1
1868 changedfiles[fname] = 1
1868 # Go through all our files in order sorted by name.
1869 # Go through all our files in order sorted by name.
1869 for fname in util.sort(changedfiles):
1870 for fname in util.sort(changedfiles):
1870 filerevlog = self.file(fname)
1871 filerevlog = self.file(fname)
1871 if not len(filerevlog):
1872 if not len(filerevlog):
1872 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 # Toss out the filenodes that the recipient isn't really
1874 # Toss out the filenodes that the recipient isn't really
1874 # missing.
1875 # missing.
1875 if fname in msng_filenode_set:
1876 if fname in msng_filenode_set:
1876 prune_filenodes(fname, filerevlog)
1877 prune_filenodes(fname, filerevlog)
1877 add_extra_nodes(fname, msng_filenode_set[fname])
1878 add_extra_nodes(fname, msng_filenode_set[fname])
1878 msng_filenode_lst = msng_filenode_set[fname].keys()
1879 msng_filenode_lst = msng_filenode_set[fname].keys()
1879 else:
1880 else:
1880 msng_filenode_lst = []
1881 msng_filenode_lst = []
1881 # If any filenodes are left, generate the group for them,
1882 # If any filenodes are left, generate the group for them,
1882 # otherwise don't bother.
1883 # otherwise don't bother.
1883 if len(msng_filenode_lst) > 0:
1884 if len(msng_filenode_lst) > 0:
1884 yield changegroup.chunkheader(len(fname))
1885 yield changegroup.chunkheader(len(fname))
1885 yield fname
1886 yield fname
1886 # Sort the filenodes by their revision #
1887 # Sort the filenodes by their revision #
1887 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1888 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1888 # Create a group generator and only pass in a changenode
1889 # Create a group generator and only pass in a changenode
1889 # lookup function as we need to collect no information
1890 # lookup function as we need to collect no information
1890 # from filenodes.
1891 # from filenodes.
1891 group = filerevlog.group(msng_filenode_lst,
1892 group = filerevlog.group(msng_filenode_lst,
1892 lookup_filenode_link_func(fname))
1893 lookup_filenode_link_func(fname))
1893 for chnk in group:
1894 for chnk in group:
1894 yield chnk
1895 yield chnk
1895 if fname in msng_filenode_set:
1896 if fname in msng_filenode_set:
1896 # Don't need this anymore, toss it to free memory.
1897 # Don't need this anymore, toss it to free memory.
1897 del msng_filenode_set[fname]
1898 del msng_filenode_set[fname]
1898 # Signal that no more groups are left.
1899 # Signal that no more groups are left.
1899 yield changegroup.closechunk()
1900 yield changegroup.closechunk()
1900
1901
1901 if msng_cl_lst:
1902 if msng_cl_lst:
1902 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1903 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1903
1904
1904 return util.chunkbuffer(gengroup())
1905 return util.chunkbuffer(gengroup())
1905
1906
1906 def changegroup(self, basenodes, source):
1907 def changegroup(self, basenodes, source):
1907 # to avoid a race we use changegroupsubset() (issue1320)
1908 # to avoid a race we use changegroupsubset() (issue1320)
1908 return self.changegroupsubset(basenodes, self.heads(), source)
1909 return self.changegroupsubset(basenodes, self.heads(), source)
1909
1910
1910 def _changegroup(self, common, source):
1911 def _changegroup(self, common, source):
1911 """Generate a changegroup of all nodes that we have that a recipient
1912 """Generate a changegroup of all nodes that we have that a recipient
1912 doesn't.
1913 doesn't.
1913
1914
1914 This is much easier than the previous function as we can assume that
1915 This is much easier than the previous function as we can assume that
1915 the recipient has any changenode we aren't sending them.
1916 the recipient has any changenode we aren't sending them.
1916
1917
1917 common is the set of common nodes between remote and self"""
1918 common is the set of common nodes between remote and self"""
1918
1919
1919 self.hook('preoutgoing', throw=True, source=source)
1920 self.hook('preoutgoing', throw=True, source=source)
1920
1921
1921 cl = self.changelog
1922 cl = self.changelog
1922 nodes = cl.findmissing(common)
1923 nodes = cl.findmissing(common)
1923 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1924 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1924 self.changegroupinfo(nodes, source)
1925 self.changegroupinfo(nodes, source)
1925
1926
1926 def identity(x):
1927 def identity(x):
1927 return x
1928 return x
1928
1929
1929 def gennodelst(log):
1930 def gennodelst(log):
1930 for r in log:
1931 for r in log:
1931 if log.linkrev(r) in revset:
1932 if log.linkrev(r) in revset:
1932 yield log.node(r)
1933 yield log.node(r)
1933
1934
1934 def changed_file_collector(changedfileset):
1935 def changed_file_collector(changedfileset):
1935 def collect_changed_files(clnode):
1936 def collect_changed_files(clnode):
1936 c = cl.read(clnode)
1937 c = cl.read(clnode)
1937 for fname in c[3]:
1938 for fname in c[3]:
1938 changedfileset[fname] = 1
1939 changedfileset[fname] = 1
1939 return collect_changed_files
1940 return collect_changed_files
1940
1941
1941 def lookuprevlink_func(revlog):
1942 def lookuprevlink_func(revlog):
1942 def lookuprevlink(n):
1943 def lookuprevlink(n):
1943 return cl.node(revlog.linkrev(revlog.rev(n)))
1944 return cl.node(revlog.linkrev(revlog.rev(n)))
1944 return lookuprevlink
1945 return lookuprevlink
1945
1946
1946 def gengroup():
1947 def gengroup():
1947 # construct a list of all changed files
1948 # construct a list of all changed files
1948 changedfiles = {}
1949 changedfiles = {}
1949
1950
1950 for chnk in cl.group(nodes, identity,
1951 for chnk in cl.group(nodes, identity,
1951 changed_file_collector(changedfiles)):
1952 changed_file_collector(changedfiles)):
1952 yield chnk
1953 yield chnk
1953
1954
1954 mnfst = self.manifest
1955 mnfst = self.manifest
1955 nodeiter = gennodelst(mnfst)
1956 nodeiter = gennodelst(mnfst)
1956 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1957 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1957 yield chnk
1958 yield chnk
1958
1959
1959 for fname in util.sort(changedfiles):
1960 for fname in util.sort(changedfiles):
1960 filerevlog = self.file(fname)
1961 filerevlog = self.file(fname)
1961 if not len(filerevlog):
1962 if not len(filerevlog):
1962 raise util.Abort(_("empty or missing revlog for %s") % fname)
1963 raise util.Abort(_("empty or missing revlog for %s") % fname)
1963 nodeiter = gennodelst(filerevlog)
1964 nodeiter = gennodelst(filerevlog)
1964 nodeiter = list(nodeiter)
1965 nodeiter = list(nodeiter)
1965 if nodeiter:
1966 if nodeiter:
1966 yield changegroup.chunkheader(len(fname))
1967 yield changegroup.chunkheader(len(fname))
1967 yield fname
1968 yield fname
1968 lookup = lookuprevlink_func(filerevlog)
1969 lookup = lookuprevlink_func(filerevlog)
1969 for chnk in filerevlog.group(nodeiter, lookup):
1970 for chnk in filerevlog.group(nodeiter, lookup):
1970 yield chnk
1971 yield chnk
1971
1972
1972 yield changegroup.closechunk()
1973 yield changegroup.closechunk()
1973
1974
1974 if nodes:
1975 if nodes:
1975 self.hook('outgoing', node=hex(nodes[0]), source=source)
1976 self.hook('outgoing', node=hex(nodes[0]), source=source)
1976
1977
1977 return util.chunkbuffer(gengroup())
1978 return util.chunkbuffer(gengroup())
1978
1979
1979 def addchangegroup(self, source, srctype, url, emptyok=False):
1980 def addchangegroup(self, source, srctype, url, emptyok=False):
1980 """add changegroup to repo.
1981 """add changegroup to repo.
1981
1982
1982 return values:
1983 return values:
1983 - nothing changed or no source: 0
1984 - nothing changed or no source: 0
1984 - more heads than before: 1+added heads (2..n)
1985 - more heads than before: 1+added heads (2..n)
1985 - less heads than before: -1-removed heads (-2..-n)
1986 - less heads than before: -1-removed heads (-2..-n)
1986 - number of heads stays the same: 1
1987 - number of heads stays the same: 1
1987 """
1988 """
1988 def csmap(x):
1989 def csmap(x):
1989 self.ui.debug(_("add changeset %s\n") % short(x))
1990 self.ui.debug(_("add changeset %s\n") % short(x))
1990 return len(cl)
1991 return len(cl)
1991
1992
1992 def revmap(x):
1993 def revmap(x):
1993 return cl.rev(x)
1994 return cl.rev(x)
1994
1995
1995 if not source:
1996 if not source:
1996 return 0
1997 return 0
1997
1998
1998 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1999 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1999
2000
2000 changesets = files = revisions = 0
2001 changesets = files = revisions = 0
2001
2002
2002 # write changelog data to temp files so concurrent readers will not see
2003 # write changelog data to temp files so concurrent readers will not see
2003 # inconsistent view
2004 # inconsistent view
2004 cl = self.changelog
2005 cl = self.changelog
2005 cl.delayupdate()
2006 cl.delayupdate()
2006 oldheads = len(cl.heads())
2007 oldheads = len(cl.heads())
2007
2008
2008 tr = self.transaction()
2009 tr = self.transaction()
2009 try:
2010 try:
2010 trp = weakref.proxy(tr)
2011 trp = weakref.proxy(tr)
2011 # pull off the changeset group
2012 # pull off the changeset group
2012 self.ui.status(_("adding changesets\n"))
2013 self.ui.status(_("adding changesets\n"))
2013 cor = len(cl) - 1
2014 cor = len(cl) - 1
2014 chunkiter = changegroup.chunkiter(source)
2015 chunkiter = changegroup.chunkiter(source)
2015 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2016 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2016 raise util.Abort(_("received changelog group is empty"))
2017 raise util.Abort(_("received changelog group is empty"))
2017 cnr = len(cl) - 1
2018 cnr = len(cl) - 1
2018 changesets = cnr - cor
2019 changesets = cnr - cor
2019
2020
2020 # pull off the manifest group
2021 # pull off the manifest group
2021 self.ui.status(_("adding manifests\n"))
2022 self.ui.status(_("adding manifests\n"))
2022 chunkiter = changegroup.chunkiter(source)
2023 chunkiter = changegroup.chunkiter(source)
2023 # no need to check for empty manifest group here:
2024 # no need to check for empty manifest group here:
2024 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2025 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2025 # no new manifest will be created and the manifest group will
2026 # no new manifest will be created and the manifest group will
2026 # be empty during the pull
2027 # be empty during the pull
2027 self.manifest.addgroup(chunkiter, revmap, trp)
2028 self.manifest.addgroup(chunkiter, revmap, trp)
2028
2029
2029 # process the files
2030 # process the files
2030 self.ui.status(_("adding file changes\n"))
2031 self.ui.status(_("adding file changes\n"))
2031 while 1:
2032 while 1:
2032 f = changegroup.getchunk(source)
2033 f = changegroup.getchunk(source)
2033 if not f:
2034 if not f:
2034 break
2035 break
2035 self.ui.debug(_("adding %s revisions\n") % f)
2036 self.ui.debug(_("adding %s revisions\n") % f)
2036 fl = self.file(f)
2037 fl = self.file(f)
2037 o = len(fl)
2038 o = len(fl)
2038 chunkiter = changegroup.chunkiter(source)
2039 chunkiter = changegroup.chunkiter(source)
2039 if fl.addgroup(chunkiter, revmap, trp) is None:
2040 if fl.addgroup(chunkiter, revmap, trp) is None:
2040 raise util.Abort(_("received file revlog group is empty"))
2041 raise util.Abort(_("received file revlog group is empty"))
2041 revisions += len(fl) - o
2042 revisions += len(fl) - o
2042 files += 1
2043 files += 1
2043
2044
2044 newheads = len(self.changelog.heads())
2045 newheads = len(self.changelog.heads())
2045 heads = ""
2046 heads = ""
2046 if oldheads and newheads != oldheads:
2047 if oldheads and newheads != oldheads:
2047 heads = _(" (%+d heads)") % (newheads - oldheads)
2048 heads = _(" (%+d heads)") % (newheads - oldheads)
2048
2049
2049 self.ui.status(_("added %d changesets"
2050 self.ui.status(_("added %d changesets"
2050 " with %d changes to %d files%s\n")
2051 " with %d changes to %d files%s\n")
2051 % (changesets, revisions, files, heads))
2052 % (changesets, revisions, files, heads))
2052
2053
2053 if changesets > 0:
2054 if changesets > 0:
2054 p = lambda: self.changelog.writepending() and self.root or ""
2055 p = lambda: self.changelog.writepending() and self.root or ""
2055 self.hook('pretxnchangegroup', throw=True,
2056 self.hook('pretxnchangegroup', throw=True,
2056 node=hex(self.changelog.node(cor+1)), source=srctype,
2057 node=hex(self.changelog.node(cor+1)), source=srctype,
2057 url=url, pending=p)
2058 url=url, pending=p)
2058
2059
2059 # make changelog see real files again
2060 # make changelog see real files again
2060 cl.finalize(trp)
2061 cl.finalize(trp)
2061
2062
2062 tr.close()
2063 tr.close()
2063 finally:
2064 finally:
2064 del tr
2065 del tr
2065
2066
2066 if changesets > 0:
2067 if changesets > 0:
2067 # forcefully update the on-disk branch cache
2068 # forcefully update the on-disk branch cache
2068 self.ui.debug(_("updating the branch cache\n"))
2069 self.ui.debug(_("updating the branch cache\n"))
2069 self.branchtags()
2070 self.branchtags()
2070 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2071 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2071 source=srctype, url=url)
2072 source=srctype, url=url)
2072
2073
2073 for i in xrange(cor + 1, cnr + 1):
2074 for i in xrange(cor + 1, cnr + 1):
2074 self.hook("incoming", node=hex(self.changelog.node(i)),
2075 self.hook("incoming", node=hex(self.changelog.node(i)),
2075 source=srctype, url=url)
2076 source=srctype, url=url)
2076
2077
2077 # never return 0 here:
2078 # never return 0 here:
2078 if newheads < oldheads:
2079 if newheads < oldheads:
2079 return newheads - oldheads - 1
2080 return newheads - oldheads - 1
2080 else:
2081 else:
2081 return newheads - oldheads + 1
2082 return newheads - oldheads + 1
2082
2083
2083
2084
2084 def stream_in(self, remote):
2085 def stream_in(self, remote):
2085 fp = remote.stream_out()
2086 fp = remote.stream_out()
2086 l = fp.readline()
2087 l = fp.readline()
2087 try:
2088 try:
2088 resp = int(l)
2089 resp = int(l)
2089 except ValueError:
2090 except ValueError:
2090 raise error.ResponseError(
2091 raise error.ResponseError(
2091 _('Unexpected response from remote server:'), l)
2092 _('Unexpected response from remote server:'), l)
2092 if resp == 1:
2093 if resp == 1:
2093 raise util.Abort(_('operation forbidden by server'))
2094 raise util.Abort(_('operation forbidden by server'))
2094 elif resp == 2:
2095 elif resp == 2:
2095 raise util.Abort(_('locking the remote repository failed'))
2096 raise util.Abort(_('locking the remote repository failed'))
2096 elif resp != 0:
2097 elif resp != 0:
2097 raise util.Abort(_('the server sent an unknown error code'))
2098 raise util.Abort(_('the server sent an unknown error code'))
2098 self.ui.status(_('streaming all changes\n'))
2099 self.ui.status(_('streaming all changes\n'))
2099 l = fp.readline()
2100 l = fp.readline()
2100 try:
2101 try:
2101 total_files, total_bytes = map(int, l.split(' ', 1))
2102 total_files, total_bytes = map(int, l.split(' ', 1))
2102 except (ValueError, TypeError):
2103 except (ValueError, TypeError):
2103 raise error.ResponseError(
2104 raise error.ResponseError(
2104 _('Unexpected response from remote server:'), l)
2105 _('Unexpected response from remote server:'), l)
2105 self.ui.status(_('%d files to transfer, %s of data\n') %
2106 self.ui.status(_('%d files to transfer, %s of data\n') %
2106 (total_files, util.bytecount(total_bytes)))
2107 (total_files, util.bytecount(total_bytes)))
2107 start = time.time()
2108 start = time.time()
2108 for i in xrange(total_files):
2109 for i in xrange(total_files):
2109 # XXX doesn't support '\n' or '\r' in filenames
2110 # XXX doesn't support '\n' or '\r' in filenames
2110 l = fp.readline()
2111 l = fp.readline()
2111 try:
2112 try:
2112 name, size = l.split('\0', 1)
2113 name, size = l.split('\0', 1)
2113 size = int(size)
2114 size = int(size)
2114 except (ValueError, TypeError):
2115 except (ValueError, TypeError):
2115 raise error.ResponseError(
2116 raise error.ResponseError(
2116 _('Unexpected response from remote server:'), l)
2117 _('Unexpected response from remote server:'), l)
2117 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2118 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2118 ofp = self.sopener(name, 'w')
2119 ofp = self.sopener(name, 'w')
2119 for chunk in util.filechunkiter(fp, limit=size):
2120 for chunk in util.filechunkiter(fp, limit=size):
2120 ofp.write(chunk)
2121 ofp.write(chunk)
2121 ofp.close()
2122 ofp.close()
2122 elapsed = time.time() - start
2123 elapsed = time.time() - start
2123 if elapsed <= 0:
2124 if elapsed <= 0:
2124 elapsed = 0.001
2125 elapsed = 0.001
2125 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2126 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2126 (util.bytecount(total_bytes), elapsed,
2127 (util.bytecount(total_bytes), elapsed,
2127 util.bytecount(total_bytes / elapsed)))
2128 util.bytecount(total_bytes / elapsed)))
2128 self.invalidate()
2129 self.invalidate()
2129 return len(self.heads()) + 1
2130 return len(self.heads()) + 1
2130
2131
2131 def clone(self, remote, heads=[], stream=False):
2132 def clone(self, remote, heads=[], stream=False):
2132 '''clone remote repository.
2133 '''clone remote repository.
2133
2134
2134 keyword arguments:
2135 keyword arguments:
2135 heads: list of revs to clone (forces use of pull)
2136 heads: list of revs to clone (forces use of pull)
2136 stream: use streaming clone if possible'''
2137 stream: use streaming clone if possible'''
2137
2138
2138 # now, all clients that can request uncompressed clones can
2139 # now, all clients that can request uncompressed clones can
2139 # read repo formats supported by all servers that can serve
2140 # read repo formats supported by all servers that can serve
2140 # them.
2141 # them.
2141
2142
2142 # if revlog format changes, client will have to check version
2143 # if revlog format changes, client will have to check version
2143 # and format flags on "stream" capability, and use
2144 # and format flags on "stream" capability, and use
2144 # uncompressed only if compatible.
2145 # uncompressed only if compatible.
2145
2146
2146 if stream and not heads and remote.capable('stream'):
2147 if stream and not heads and remote.capable('stream'):
2147 return self.stream_in(remote)
2148 return self.stream_in(remote)
2148 return self.pull(remote, heads)
2149 return self.pull(remote, heads)
2149
2150
2150 # used to avoid circular references so destructors work
2151 # used to avoid circular references so destructors work
2151 def aftertrans(files):
2152 def aftertrans(files):
2152 renamefiles = [tuple(t) for t in files]
2153 renamefiles = [tuple(t) for t in files]
2153 def a():
2154 def a():
2154 for src, dest in renamefiles:
2155 for src, dest in renamefiles:
2155 util.rename(src, dest)
2156 util.rename(src, dest)
2156 return a
2157 return a
2157
2158
2158 def instance(ui, path, create):
2159 def instance(ui, path, create):
2159 return localrepository(ui, util.drop_scheme('file', path), create)
2160 return localrepository(ui, util.drop_scheme('file', path), create)
2160
2161
2161 def islocal(path):
2162 def islocal(path):
2162 return True
2163 return True
@@ -1,53 +1,54 b''
1 changeset: 0:0acdaf898367
1 changeset: 0:0acdaf898367
2 tag: tip
2 tag: tip
3 user: test
3 user: test
4 date: Mon Jan 12 13:46:40 1970 +0000
4 date: Mon Jan 12 13:46:40 1970 +0000
5 summary: test
5 summary: test
6
6
7 changeset: 1:3ecf002a1c57
7 changeset: 1:3ecf002a1c57
8 tag: tip
8 tag: tip
9 user: test
9 user: test
10 date: Mon Jan 12 13:46:40 1970 +0000
10 date: Mon Jan 12 13:46:40 1970 +0000
11 summary: Added tag bleah for changeset 0acdaf898367
11 summary: Added tag bleah for changeset 0acdaf898367
12
12
13 changeset: 0:0acdaf898367
13 changeset: 0:0acdaf898367
14 tag: bleah
14 tag: bleah
15 user: test
15 user: test
16 date: Mon Jan 12 13:46:40 1970 +0000
16 date: Mon Jan 12 13:46:40 1970 +0000
17 summary: test
17 summary: test
18
18
19 abort: working copy of .hgtags is changed (please commit .hgtags manually)
19 abort: working copy of .hgtags is changed (please commit .hgtags manually)
20 failed
20 failed
21 abort: tag names must be unique
21 abort: tag names must be unique
22 failed
22 failed
23 abort: the name 'tip' is reserved
23 abort: the name 'tip' is reserved
24 failed
24 failed
25 abort: tag 'bleah' already exists (use -f to force)
25 abort: tag 'bleah' already exists (use -f to force)
26 failed
26 failed
27 abort: tag 'bleah' already exists (use -f to force)
27 abort: tag 'bleah' already exists (use -f to force)
28 failed
28 failed
29 abort: tag 'blecch' does not exist
29 abort: tag 'blecch' does not exist
30 failed
30 failed
31 abort: tag 'blecch' does not exist
31 abort: tag 'blecch' does not exist
32 failed
32 failed
33 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 bleah
33 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 bleah
34 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 bleah0
34 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 bleah0
35 868cc8fbb43b754ad09fa109885d243fc49adae7 gack
35 868cc8fbb43b754ad09fa109885d243fc49adae7 gack
36 868cc8fbb43b754ad09fa109885d243fc49adae7 gawk
36 868cc8fbb43b754ad09fa109885d243fc49adae7 gawk
37 868cc8fbb43b754ad09fa109885d243fc49adae7 gorp
37 868cc8fbb43b754ad09fa109885d243fc49adae7 gorp
38 868cc8fbb43b754ad09fa109885d243fc49adae7 gack
38 3807bcf62c5614cb6c16436b514d7764ca5f1631 gack
39 3807bcf62c5614cb6c16436b514d7764ca5f1631 gack
39 3807bcf62c5614cb6c16436b514d7764ca5f1631 gack
40 3807bcf62c5614cb6c16436b514d7764ca5f1631 gack
40 0000000000000000000000000000000000000000 gack
41 0000000000000000000000000000000000000000 gack
41 868cc8fbb43b754ad09fa109885d243fc49adae7 gorp
42 868cc8fbb43b754ad09fa109885d243fc49adae7 gorp
42 0000000000000000000000000000000000000000 gorp
43 0000000000000000000000000000000000000000 gorp
43 3ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah1
44 3ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah1
44 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
45 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
45 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 foobar
46 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 foobar
46 3ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah1
47 3ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah1
47 abort: '\n' cannot be used in a tag name
48 abort: '\n' cannot be used in a tag name
48 abort: ':' cannot be used in a tag name
49 abort: ':' cannot be used in a tag name
49 % issue 601
50 % issue 601
50 3ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah13ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah1
51 3ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah13ecf002a1c572a2f3bb4e665417e60fca65bbd42 bleah1
51 f68b039e72eacbb2e68b0543e1f6e50990aa2bb5 localnewline
52 f68b039e72eacbb2e68b0543e1f6e50990aa2bb5 localnewline
52 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 foobar0acdaf8983679e0aac16e811534eb49d7ee1f2b4 foobar
53 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 foobar0acdaf8983679e0aac16e811534eb49d7ee1f2b4 foobar
53 6ae703d793c8b1f097116869275ecd97b2977a2b newline
54 6ae703d793c8b1f097116869275ecd97b2977a2b newline
@@ -1,83 +1,83 b''
1 000000000000 tip
1 000000000000 tip
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0acdaf898367 tip
3 0acdaf898367 tip
4 tip 0:0acdaf898367
4 tip 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 tip 1:8a3ca90d111d
7 tip 1:8a3ca90d111d
8 first 0:0acdaf898367
8 first 0:0acdaf898367
9 8a3ca90d111d tip
9 8a3ca90d111d tip
10 M a
10 M a
11 8a3ca90d111d+ tip
11 8a3ca90d111d+ tip
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 0acdaf898367+ first
13 0acdaf898367+ first
14 0acdaf898367+ first
14 0acdaf898367+ first
15 M a
15 M a
16 created new head
16 created new head
17 8216907a933d tip
17 8216907a933d tip
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 (branch merge, don't forget to commit)
19 (branch merge, don't forget to commit)
20 8216907a933d+8a3ca90d111d+ tip
20 8216907a933d+8a3ca90d111d+ tip
21 M .hgtags
21 M .hgtags
22 tip 6:e2174d339386
22 tip 6:e2174d339386
23 first 0:0acdaf898367
23 first 0:0acdaf898367
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 created new head
25 created new head
26 .hgtags@c071f74ab5eb, line 2: cannot parse entry
26 .hgtags@c071f74ab5eb, line 2: cannot parse entry
27 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
27 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
28 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
28 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
29 localtags, line 1: tag 'invalid' refers to unknown node
29 localtags, line 1: tag 'invalid' refers to unknown node
30 tip 8:4ca6f1b1a68c
30 tip 8:4ca6f1b1a68c
31 first 0:0acdaf898367
31 first 0:0acdaf898367
32 changeset: 8:4ca6f1b1a68c
32 changeset: 8:4ca6f1b1a68c
33 .hgtags@c071f74ab5eb, line 2: cannot parse entry
33 .hgtags@c071f74ab5eb, line 2: cannot parse entry
34 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
34 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
35 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
35 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
36 localtags, line 1: tag 'invalid' refers to unknown node
36 localtags, line 1: tag 'invalid' refers to unknown node
37 tag: tip
37 tag: tip
38 parent: 3:b2ef3841386b
38 parent: 3:b2ef3841386b
39 user: test
39 user: test
40 date: Mon Jan 12 13:46:40 1970 +0000
40 date: Mon Jan 12 13:46:40 1970 +0000
41 summary: head
41 summary: head
42
42
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 created new head
45 created new head
46 tip 4:36195b728445
46 tip 4:36195b728445
47 bar 1:b204a97e6e8d
47 bar 1:b204a97e6e8d
48 changeset: 5:1f98c77278de
48 changeset: 5:1f98c77278de
49 tag: tip
49 tag: tip
50 user: test
50 user: test
51 date: Mon Jan 12 13:46:40 1970 +0000
51 date: Mon Jan 12 13:46:40 1970 +0000
52 summary: Removed tag bar
52 summary: Removed tag bar
53
53
54 tip 5:1f98c77278de
54 tip 5:1f98c77278de
55 % remove nonexistent tag
55 % remove nonexistent tag
56 abort: tag 'foobar' does not exist
56 abort: tag 'foobar' does not exist
57 changeset: 5:1f98c77278de
57 changeset: 5:1f98c77278de
58 tag: tip
58 tag: tip
59 user: test
59 user: test
60 date: Mon Jan 12 13:46:40 1970 +0000
60 date: Mon Jan 12 13:46:40 1970 +0000
61 summary: Removed tag bar
61 summary: Removed tag bar
62
62
63 tip 5:d8bb4d1eff25
63 tip 5:e86d7ed95fd3
64 bar 0:b409d9da318e
64 bar 0:b409d9da318e
65 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 created new head
66 created new head
67 tip 6:b5ff9d142648
67 tip 6:b744fbe1f6dd
68 bar 0:b409d9da318e
68 bar 0:b409d9da318e
69 abort: tag 'bar' already exists (use -f to force)
69 abort: tag 'bar' already exists (use -f to force)
70 tip 6:b5ff9d142648
70 tip 6:b744fbe1f6dd
71 bar 0:b409d9da318e
71 bar 0:b409d9da318e
72 adding foo
72 adding foo
73 tip 3:ca8479b4351c
73 tip 3:197c21bbbf2c
74 bar 2:72b852876a42
74 bar 2:6fa450212aeb
75 % bar should still point to rev 2
75 % bar should still point to rev 2
76 tip 4:40af5d225513
76 tip 4:3b4b14ed0202
77 bar 2:72b852876a42
77 bar 2:6fa450212aeb
78 adding foo
78 adding foo
79 abort: tag 'localtag' is not a global tag
79 abort: tag 'localtag' is not a global tag
80 abort: tag 'globaltag' is not a local tag
80 abort: tag 'globaltag' is not a local tag
81 tip 1:a0b6fe111088
81 tip 1:a0b6fe111088
82 localtag 0:bbd179dfa0a7 local
82 localtag 0:bbd179dfa0a7 local
83 globaltag 0:bbd179dfa0a7
83 globaltag 0:bbd179dfa0a7
General Comments 0
You need to be logged in to leave comments. Login now