##// END OF EJS Templates
branch closing: permit closing the default branch...
John Mulligan -
r7728:b7ac53f7 default
parent child Browse files
Show More
@@ -1,2156 +1,2154 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise error.RepoError(_("repository %s not found") % path)
50 raise error.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise error.RepoError(_("repository %s already exists") % path)
52 raise error.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise error.RepoError(_("requirement '%s' not supported") % r)
60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError(name)
101 raise AttributeError(name)
102
102
103 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
104 if changeid == None:
104 if changeid == None:
105 return context.workingctx(self)
105 return context.workingctx(self)
106 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
107
107
108 def __nonzero__(self):
108 def __nonzero__(self):
109 return True
109 return True
110
110
111 def __len__(self):
111 def __len__(self):
112 return len(self.changelog)
112 return len(self.changelog)
113
113
114 def __iter__(self):
114 def __iter__(self):
115 for i in xrange(len(self)):
115 for i in xrange(len(self)):
116 yield i
116 yield i
117
117
118 def url(self):
118 def url(self):
119 return 'file:' + self.root
119 return 'file:' + self.root
120
120
121 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
123
123
124 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
125
125
126 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
127 extra={}):
128 use_dirstate = parent is None
128 use_dirstate = parent is None
129
129
130 if isinstance(names, str):
130 if isinstance(names, str):
131 allchars = names
131 allchars = names
132 names = (names,)
132 names = (names,)
133 else:
133 else:
134 allchars = ''.join(names)
134 allchars = ''.join(names)
135 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
136 if c in allchars:
136 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
138
139 for name in names:
139 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
141 local=local)
142
142
143 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
144 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
146 fp.write('\n')
147 for name in names:
147 for name in names:
148 m = munge and munge(name) or name
148 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
153 fp.close()
154
154
155 prevtags = ''
155 prevtags = ''
156 if local:
156 if local:
157 try:
157 try:
158 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
159 except IOError, err:
159 except IOError, err:
160 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
161 else:
161 else:
162 prevtags = fp.read()
162 prevtags = fp.read()
163
163
164 # local tags are stored in the current charset
164 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
166 for name in names:
166 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
168 return
169
169
170 if use_dirstate:
170 if use_dirstate:
171 try:
171 try:
172 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
173 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177 else:
177 else:
178 try:
178 try:
179 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
180 except error.LookupError:
180 except error.LookupError:
181 pass
181 pass
182 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
183 if prevtags:
184 fp.write(prevtags)
184 fp.write(prevtags)
185
185
186 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
188
188
189 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
190 self.add(['.hgtags'])
191
191
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
193 extra=extra)
194
194
195 for name in names:
195 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
197
197
198 return tagnode
198 return tagnode
199
199
200 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
202
202
203 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
204 string.
204 string.
205
205
206 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
208 changeset is committed with the change.
209
209
210 keyword arguments:
210 keyword arguments:
211
211
212 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
213 (default False)
213 (default False)
214
214
215 message: commit message to use if committing
215 message: commit message to use if committing
216
216
217 user: name of user to use if committing
217 user: name of user to use if committing
218
218
219 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
220
220
221 for x in self.status()[:5]:
221 for x in self.status()[:5]:
222 if '.hgtags' in x:
222 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
225
225
226 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
227
227
228 def tags(self):
228 def tags(self):
229 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
230 if self.tagscache:
230 if self.tagscache:
231 return self.tagscache
231 return self.tagscache
232
232
233 globaltags = {}
233 globaltags = {}
234 tagtypes = {}
234 tagtypes = {}
235
235
236 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
237 filetags = {}
237 filetags = {}
238 count = 0
238 count = 0
239
239
240 def warn(msg):
240 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
242
243 for l in lines:
243 for l in lines:
244 count += 1
244 count += 1
245 if not l:
245 if not l:
246 continue
246 continue
247 s = l.split(" ", 1)
247 s = l.split(" ", 1)
248 if len(s) != 2:
248 if len(s) != 2:
249 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
250 continue
250 continue
251 node, key = s
251 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
253 try:
254 bin_n = bin(node)
254 bin_n = bin(node)
255 except TypeError:
255 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
257 continue
257 continue
258 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
260 continue
260 continue
261
261
262 h = []
262 h = []
263 if key in filetags:
263 if key in filetags:
264 n, h = filetags[key]
264 n, h = filetags[key]
265 h.append(n)
265 h.append(n)
266 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
267
267
268 for k, nh in filetags.iteritems():
268 for k, nh in filetags.iteritems():
269 if k not in globaltags:
269 if k not in globaltags:
270 globaltags[k] = nh
270 globaltags[k] = nh
271 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
272 continue
272 continue
273
273
274 # we prefer the global tag if:
274 # we prefer the global tag if:
275 # it supercedes us OR
275 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
278 an, ah = nh
278 an, ah = nh
279 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
280 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
282 an = bn
282 an = bn
283 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
284 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
286
286
287 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
288 f = None
288 f = None
289 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
293
293
294 try:
294 try:
295 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
296 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
299 except IOError:
300 pass
300 pass
301
301
302 self.tagscache = {}
302 self.tagscache = {}
303 self._tagstypecache = {}
303 self._tagstypecache = {}
304 for k, nh in globaltags.iteritems():
304 for k, nh in globaltags.iteritems():
305 n = nh[0]
305 n = nh[0]
306 if n != nullid:
306 if n != nullid:
307 self.tagscache[k] = n
307 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
310 return self.tagscache
311
311
312 def tagtype(self, tagname):
312 def tagtype(self, tagname):
313 '''
313 '''
314 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
315
315
316 'local' : a local tag
316 'local' : a local tag
317 'global' : a global tag
317 'global' : a global tag
318 None : tag does not exist
318 None : tag does not exist
319 '''
319 '''
320
320
321 self.tags()
321 self.tags()
322
322
323 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
324
324
325 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
326 heads = self.heads()
326 heads = self.heads()
327 heads.reverse()
327 heads.reverse()
328 last = {}
328 last = {}
329 ret = []
329 ret = []
330 for node in heads:
330 for node in heads:
331 c = self[node]
331 c = self[node]
332 rev = c.rev()
332 rev = c.rev()
333 try:
333 try:
334 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
335 except error.LookupError:
335 except error.LookupError:
336 continue
336 continue
337 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
338 if fnode in last:
338 if fnode in last:
339 ret[last[fnode]] = None
339 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
341 return [item for item in ret if item]
342
342
343 def tagslist(self):
343 def tagslist(self):
344 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
345 l = []
345 l = []
346 for t, n in self.tags().iteritems():
346 for t, n in self.tags().iteritems():
347 try:
347 try:
348 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
349 except:
349 except:
350 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
351 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
353
353
354 def nodetags(self, node):
354 def nodetags(self, node):
355 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
356 if not self.nodetagscache:
356 if not self.nodetagscache:
357 self.nodetagscache = {}
357 self.nodetagscache = {}
358 for t, n in self.tags().iteritems():
358 for t, n in self.tags().iteritems():
359 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
361
361
362 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
363 # TODO: rename this function?
363 # TODO: rename this function?
364 tiprev = len(self) - 1
364 tiprev = len(self) - 1
365 if lrev != tiprev:
365 if lrev != tiprev:
366 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._updatebranchcache(partial, lrev+1, tiprev+1)
367 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 self._writebranchcache(partial, self.changelog.tip(), tiprev)
368
368
369 return partial
369 return partial
370
370
371 def _branchheads(self):
371 def _branchheads(self):
372 tip = self.changelog.tip()
372 tip = self.changelog.tip()
373 if self.branchcache is not None and self._branchcachetip == tip:
373 if self.branchcache is not None and self._branchcachetip == tip:
374 return self.branchcache
374 return self.branchcache
375
375
376 oldtip = self._branchcachetip
376 oldtip = self._branchcachetip
377 self._branchcachetip = tip
377 self._branchcachetip = tip
378 if self.branchcache is None:
378 if self.branchcache is None:
379 self.branchcache = {} # avoid recursion in changectx
379 self.branchcache = {} # avoid recursion in changectx
380 else:
380 else:
381 self.branchcache.clear() # keep using the same dict
381 self.branchcache.clear() # keep using the same dict
382 if oldtip is None or oldtip not in self.changelog.nodemap:
382 if oldtip is None or oldtip not in self.changelog.nodemap:
383 partial, last, lrev = self._readbranchcache()
383 partial, last, lrev = self._readbranchcache()
384 else:
384 else:
385 lrev = self.changelog.rev(oldtip)
385 lrev = self.changelog.rev(oldtip)
386 partial = self._ubranchcache
386 partial = self._ubranchcache
387
387
388 self._branchtags(partial, lrev)
388 self._branchtags(partial, lrev)
389 # this private cache holds all heads (not just tips)
389 # this private cache holds all heads (not just tips)
390 self._ubranchcache = partial
390 self._ubranchcache = partial
391
391
392 # the branch cache is stored on disk as UTF-8, but in the local
392 # the branch cache is stored on disk as UTF-8, but in the local
393 # charset internally
393 # charset internally
394 for k, v in partial.iteritems():
394 for k, v in partial.iteritems():
395 self.branchcache[util.tolocal(k)] = v
395 self.branchcache[util.tolocal(k)] = v
396 return self.branchcache
396 return self.branchcache
397
397
398
398
399 def branchtags(self):
399 def branchtags(self):
400 '''return a dict where branch names map to the tipmost head of
400 '''return a dict where branch names map to the tipmost head of
401 the branch, open heads come before closed'''
401 the branch, open heads come before closed'''
402 bt = {}
402 bt = {}
403 for bn, heads in self._branchheads().iteritems():
403 for bn, heads in self._branchheads().iteritems():
404 head = None
404 head = None
405 for i in range(len(heads)-1, -1, -1):
405 for i in range(len(heads)-1, -1, -1):
406 h = heads[i]
406 h = heads[i]
407 if 'close' not in self.changelog.read(h)[5]:
407 if 'close' not in self.changelog.read(h)[5]:
408 head = h
408 head = h
409 break
409 break
410 # no open heads were found
410 # no open heads were found
411 if head is None:
411 if head is None:
412 head = heads[-1]
412 head = heads[-1]
413 bt[bn] = head
413 bt[bn] = head
414 return bt
414 return bt
415
415
416
416
417 def _readbranchcache(self):
417 def _readbranchcache(self):
418 partial = {}
418 partial = {}
419 try:
419 try:
420 f = self.opener("branchheads.cache")
420 f = self.opener("branchheads.cache")
421 lines = f.read().split('\n')
421 lines = f.read().split('\n')
422 f.close()
422 f.close()
423 except (IOError, OSError):
423 except (IOError, OSError):
424 return {}, nullid, nullrev
424 return {}, nullid, nullrev
425
425
426 try:
426 try:
427 last, lrev = lines.pop(0).split(" ", 1)
427 last, lrev = lines.pop(0).split(" ", 1)
428 last, lrev = bin(last), int(lrev)
428 last, lrev = bin(last), int(lrev)
429 if lrev >= len(self) or self[lrev].node() != last:
429 if lrev >= len(self) or self[lrev].node() != last:
430 # invalidate the cache
430 # invalidate the cache
431 raise ValueError('invalidating branch cache (tip differs)')
431 raise ValueError('invalidating branch cache (tip differs)')
432 for l in lines:
432 for l in lines:
433 if not l: continue
433 if not l: continue
434 node, label = l.split(" ", 1)
434 node, label = l.split(" ", 1)
435 partial.setdefault(label.strip(), []).append(bin(node))
435 partial.setdefault(label.strip(), []).append(bin(node))
436 except KeyboardInterrupt:
436 except KeyboardInterrupt:
437 raise
437 raise
438 except Exception, inst:
438 except Exception, inst:
439 if self.ui.debugflag:
439 if self.ui.debugflag:
440 self.ui.warn(str(inst), '\n')
440 self.ui.warn(str(inst), '\n')
441 partial, last, lrev = {}, nullid, nullrev
441 partial, last, lrev = {}, nullid, nullrev
442 return partial, last, lrev
442 return partial, last, lrev
443
443
444 def _writebranchcache(self, branches, tip, tiprev):
444 def _writebranchcache(self, branches, tip, tiprev):
445 try:
445 try:
446 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 f = self.opener("branchheads.cache", "w", atomictemp=True)
447 f.write("%s %s\n" % (hex(tip), tiprev))
447 f.write("%s %s\n" % (hex(tip), tiprev))
448 for label, nodes in branches.iteritems():
448 for label, nodes in branches.iteritems():
449 for node in nodes:
449 for node in nodes:
450 f.write("%s %s\n" % (hex(node), label))
450 f.write("%s %s\n" % (hex(node), label))
451 f.rename()
451 f.rename()
452 except (IOError, OSError):
452 except (IOError, OSError):
453 pass
453 pass
454
454
455 def _updatebranchcache(self, partial, start, end):
455 def _updatebranchcache(self, partial, start, end):
456 for r in xrange(start, end):
456 for r in xrange(start, end):
457 c = self[r]
457 c = self[r]
458 b = c.branch()
458 b = c.branch()
459 bheads = partial.setdefault(b, [])
459 bheads = partial.setdefault(b, [])
460 bheads.append(c.node())
460 bheads.append(c.node())
461 for p in c.parents():
461 for p in c.parents():
462 pn = p.node()
462 pn = p.node()
463 if pn in bheads:
463 if pn in bheads:
464 bheads.remove(pn)
464 bheads.remove(pn)
465
465
466 def lookup(self, key):
466 def lookup(self, key):
467 if isinstance(key, int):
467 if isinstance(key, int):
468 return self.changelog.node(key)
468 return self.changelog.node(key)
469 elif key == '.':
469 elif key == '.':
470 return self.dirstate.parents()[0]
470 return self.dirstate.parents()[0]
471 elif key == 'null':
471 elif key == 'null':
472 return nullid
472 return nullid
473 elif key == 'tip':
473 elif key == 'tip':
474 return self.changelog.tip()
474 return self.changelog.tip()
475 n = self.changelog._match(key)
475 n = self.changelog._match(key)
476 if n:
476 if n:
477 return n
477 return n
478 if key in self.tags():
478 if key in self.tags():
479 return self.tags()[key]
479 return self.tags()[key]
480 if key in self.branchtags():
480 if key in self.branchtags():
481 return self.branchtags()[key]
481 return self.branchtags()[key]
482 n = self.changelog._partialmatch(key)
482 n = self.changelog._partialmatch(key)
483 if n:
483 if n:
484 return n
484 return n
485 try:
485 try:
486 if len(key) == 20:
486 if len(key) == 20:
487 key = hex(key)
487 key = hex(key)
488 except:
488 except:
489 pass
489 pass
490 raise error.RepoError(_("unknown revision '%s'") % key)
490 raise error.RepoError(_("unknown revision '%s'") % key)
491
491
492 def local(self):
492 def local(self):
493 return True
493 return True
494
494
495 def join(self, f):
495 def join(self, f):
496 return os.path.join(self.path, f)
496 return os.path.join(self.path, f)
497
497
498 def wjoin(self, f):
498 def wjoin(self, f):
499 return os.path.join(self.root, f)
499 return os.path.join(self.root, f)
500
500
501 def rjoin(self, f):
501 def rjoin(self, f):
502 return os.path.join(self.root, util.pconvert(f))
502 return os.path.join(self.root, util.pconvert(f))
503
503
504 def file(self, f):
504 def file(self, f):
505 if f[0] == '/':
505 if f[0] == '/':
506 f = f[1:]
506 f = f[1:]
507 return filelog.filelog(self.sopener, f)
507 return filelog.filelog(self.sopener, f)
508
508
509 def changectx(self, changeid):
509 def changectx(self, changeid):
510 return self[changeid]
510 return self[changeid]
511
511
512 def parents(self, changeid=None):
512 def parents(self, changeid=None):
513 '''get list of changectxs for parents of changeid'''
513 '''get list of changectxs for parents of changeid'''
514 return self[changeid].parents()
514 return self[changeid].parents()
515
515
516 def filectx(self, path, changeid=None, fileid=None):
516 def filectx(self, path, changeid=None, fileid=None):
517 """changeid can be a changeset revision, node, or tag.
517 """changeid can be a changeset revision, node, or tag.
518 fileid can be a file revision or node."""
518 fileid can be a file revision or node."""
519 return context.filectx(self, path, changeid, fileid)
519 return context.filectx(self, path, changeid, fileid)
520
520
521 def getcwd(self):
521 def getcwd(self):
522 return self.dirstate.getcwd()
522 return self.dirstate.getcwd()
523
523
524 def pathto(self, f, cwd=None):
524 def pathto(self, f, cwd=None):
525 return self.dirstate.pathto(f, cwd)
525 return self.dirstate.pathto(f, cwd)
526
526
527 def wfile(self, f, mode='r'):
527 def wfile(self, f, mode='r'):
528 return self.wopener(f, mode)
528 return self.wopener(f, mode)
529
529
530 def _link(self, f):
530 def _link(self, f):
531 return os.path.islink(self.wjoin(f))
531 return os.path.islink(self.wjoin(f))
532
532
533 def _filter(self, filter, filename, data):
533 def _filter(self, filter, filename, data):
534 if filter not in self.filterpats:
534 if filter not in self.filterpats:
535 l = []
535 l = []
536 for pat, cmd in self.ui.configitems(filter):
536 for pat, cmd in self.ui.configitems(filter):
537 if cmd == '!':
537 if cmd == '!':
538 continue
538 continue
539 mf = util.matcher(self.root, "", [pat], [], [])[1]
539 mf = util.matcher(self.root, "", [pat], [], [])[1]
540 fn = None
540 fn = None
541 params = cmd
541 params = cmd
542 for name, filterfn in self._datafilters.iteritems():
542 for name, filterfn in self._datafilters.iteritems():
543 if cmd.startswith(name):
543 if cmd.startswith(name):
544 fn = filterfn
544 fn = filterfn
545 params = cmd[len(name):].lstrip()
545 params = cmd[len(name):].lstrip()
546 break
546 break
547 if not fn:
547 if not fn:
548 fn = lambda s, c, **kwargs: util.filter(s, c)
548 fn = lambda s, c, **kwargs: util.filter(s, c)
549 # Wrap old filters not supporting keyword arguments
549 # Wrap old filters not supporting keyword arguments
550 if not inspect.getargspec(fn)[2]:
550 if not inspect.getargspec(fn)[2]:
551 oldfn = fn
551 oldfn = fn
552 fn = lambda s, c, **kwargs: oldfn(s, c)
552 fn = lambda s, c, **kwargs: oldfn(s, c)
553 l.append((mf, fn, params))
553 l.append((mf, fn, params))
554 self.filterpats[filter] = l
554 self.filterpats[filter] = l
555
555
556 for mf, fn, cmd in self.filterpats[filter]:
556 for mf, fn, cmd in self.filterpats[filter]:
557 if mf(filename):
557 if mf(filename):
558 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
558 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
559 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
559 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
560 break
560 break
561
561
562 return data
562 return data
563
563
564 def adddatafilter(self, name, filter):
564 def adddatafilter(self, name, filter):
565 self._datafilters[name] = filter
565 self._datafilters[name] = filter
566
566
567 def wread(self, filename):
567 def wread(self, filename):
568 if self._link(filename):
568 if self._link(filename):
569 data = os.readlink(self.wjoin(filename))
569 data = os.readlink(self.wjoin(filename))
570 else:
570 else:
571 data = self.wopener(filename, 'r').read()
571 data = self.wopener(filename, 'r').read()
572 return self._filter("encode", filename, data)
572 return self._filter("encode", filename, data)
573
573
574 def wwrite(self, filename, data, flags):
574 def wwrite(self, filename, data, flags):
575 data = self._filter("decode", filename, data)
575 data = self._filter("decode", filename, data)
576 try:
576 try:
577 os.unlink(self.wjoin(filename))
577 os.unlink(self.wjoin(filename))
578 except OSError:
578 except OSError:
579 pass
579 pass
580 if 'l' in flags:
580 if 'l' in flags:
581 self.wopener.symlink(data, filename)
581 self.wopener.symlink(data, filename)
582 else:
582 else:
583 self.wopener(filename, 'w').write(data)
583 self.wopener(filename, 'w').write(data)
584 if 'x' in flags:
584 if 'x' in flags:
585 util.set_flags(self.wjoin(filename), False, True)
585 util.set_flags(self.wjoin(filename), False, True)
586
586
587 def wwritedata(self, filename, data):
587 def wwritedata(self, filename, data):
588 return self._filter("decode", filename, data)
588 return self._filter("decode", filename, data)
589
589
590 def transaction(self):
590 def transaction(self):
591 if self._transref and self._transref():
591 if self._transref and self._transref():
592 return self._transref().nest()
592 return self._transref().nest()
593
593
594 # abort here if the journal already exists
594 # abort here if the journal already exists
595 if os.path.exists(self.sjoin("journal")):
595 if os.path.exists(self.sjoin("journal")):
596 raise error.RepoError(_("journal already exists - run hg recover"))
596 raise error.RepoError(_("journal already exists - run hg recover"))
597
597
598 # save dirstate for rollback
598 # save dirstate for rollback
599 try:
599 try:
600 ds = self.opener("dirstate").read()
600 ds = self.opener("dirstate").read()
601 except IOError:
601 except IOError:
602 ds = ""
602 ds = ""
603 self.opener("journal.dirstate", "w").write(ds)
603 self.opener("journal.dirstate", "w").write(ds)
604 self.opener("journal.branch", "w").write(self.dirstate.branch())
604 self.opener("journal.branch", "w").write(self.dirstate.branch())
605
605
606 renames = [(self.sjoin("journal"), self.sjoin("undo")),
606 renames = [(self.sjoin("journal"), self.sjoin("undo")),
607 (self.join("journal.dirstate"), self.join("undo.dirstate")),
607 (self.join("journal.dirstate"), self.join("undo.dirstate")),
608 (self.join("journal.branch"), self.join("undo.branch"))]
608 (self.join("journal.branch"), self.join("undo.branch"))]
609 tr = transaction.transaction(self.ui.warn, self.sopener,
609 tr = transaction.transaction(self.ui.warn, self.sopener,
610 self.sjoin("journal"),
610 self.sjoin("journal"),
611 aftertrans(renames),
611 aftertrans(renames),
612 self.store.createmode)
612 self.store.createmode)
613 self._transref = weakref.ref(tr)
613 self._transref = weakref.ref(tr)
614 return tr
614 return tr
615
615
616 def recover(self):
616 def recover(self):
617 l = self.lock()
617 l = self.lock()
618 try:
618 try:
619 if os.path.exists(self.sjoin("journal")):
619 if os.path.exists(self.sjoin("journal")):
620 self.ui.status(_("rolling back interrupted transaction\n"))
620 self.ui.status(_("rolling back interrupted transaction\n"))
621 transaction.rollback(self.sopener, self.sjoin("journal"))
621 transaction.rollback(self.sopener, self.sjoin("journal"))
622 self.invalidate()
622 self.invalidate()
623 return True
623 return True
624 else:
624 else:
625 self.ui.warn(_("no interrupted transaction available\n"))
625 self.ui.warn(_("no interrupted transaction available\n"))
626 return False
626 return False
627 finally:
627 finally:
628 del l
628 del l
629
629
630 def rollback(self):
630 def rollback(self):
631 wlock = lock = None
631 wlock = lock = None
632 try:
632 try:
633 wlock = self.wlock()
633 wlock = self.wlock()
634 lock = self.lock()
634 lock = self.lock()
635 if os.path.exists(self.sjoin("undo")):
635 if os.path.exists(self.sjoin("undo")):
636 self.ui.status(_("rolling back last transaction\n"))
636 self.ui.status(_("rolling back last transaction\n"))
637 transaction.rollback(self.sopener, self.sjoin("undo"))
637 transaction.rollback(self.sopener, self.sjoin("undo"))
638 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
639 try:
639 try:
640 branch = self.opener("undo.branch").read()
640 branch = self.opener("undo.branch").read()
641 self.dirstate.setbranch(branch)
641 self.dirstate.setbranch(branch)
642 except IOError:
642 except IOError:
643 self.ui.warn(_("Named branch could not be reset, "
643 self.ui.warn(_("Named branch could not be reset, "
644 "current branch still is: %s\n")
644 "current branch still is: %s\n")
645 % util.tolocal(self.dirstate.branch()))
645 % util.tolocal(self.dirstate.branch()))
646 self.invalidate()
646 self.invalidate()
647 self.dirstate.invalidate()
647 self.dirstate.invalidate()
648 else:
648 else:
649 self.ui.warn(_("no rollback information available\n"))
649 self.ui.warn(_("no rollback information available\n"))
650 finally:
650 finally:
651 del lock, wlock
651 del lock, wlock
652
652
653 def invalidate(self):
653 def invalidate(self):
654 for a in "changelog manifest".split():
654 for a in "changelog manifest".split():
655 if a in self.__dict__:
655 if a in self.__dict__:
656 delattr(self, a)
656 delattr(self, a)
657 self.tagscache = None
657 self.tagscache = None
658 self._tagstypecache = None
658 self._tagstypecache = None
659 self.nodetagscache = None
659 self.nodetagscache = None
660 self.branchcache = None
660 self.branchcache = None
661 self._ubranchcache = None
661 self._ubranchcache = None
662 self._branchcachetip = None
662 self._branchcachetip = None
663
663
664 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
664 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
665 try:
665 try:
666 l = lock.lock(lockname, 0, releasefn, desc=desc)
666 l = lock.lock(lockname, 0, releasefn, desc=desc)
667 except error.LockHeld, inst:
667 except error.LockHeld, inst:
668 if not wait:
668 if not wait:
669 raise
669 raise
670 self.ui.warn(_("waiting for lock on %s held by %r\n") %
670 self.ui.warn(_("waiting for lock on %s held by %r\n") %
671 (desc, inst.locker))
671 (desc, inst.locker))
672 # default to 600 seconds timeout
672 # default to 600 seconds timeout
673 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
673 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
674 releasefn, desc=desc)
674 releasefn, desc=desc)
675 if acquirefn:
675 if acquirefn:
676 acquirefn()
676 acquirefn()
677 return l
677 return l
678
678
679 def lock(self, wait=True):
679 def lock(self, wait=True):
680 if self._lockref and self._lockref():
680 if self._lockref and self._lockref():
681 return self._lockref()
681 return self._lockref()
682
682
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 _('repository %s') % self.origroot)
684 _('repository %s') % self.origroot)
685 self._lockref = weakref.ref(l)
685 self._lockref = weakref.ref(l)
686 return l
686 return l
687
687
688 def wlock(self, wait=True):
688 def wlock(self, wait=True):
689 if self._wlockref and self._wlockref():
689 if self._wlockref and self._wlockref():
690 return self._wlockref()
690 return self._wlockref()
691
691
692 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
692 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
693 self.dirstate.invalidate, _('working directory of %s') %
693 self.dirstate.invalidate, _('working directory of %s') %
694 self.origroot)
694 self.origroot)
695 self._wlockref = weakref.ref(l)
695 self._wlockref = weakref.ref(l)
696 return l
696 return l
697
697
698 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
698 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
699 """
699 """
700 commit an individual file as part of a larger transaction
700 commit an individual file as part of a larger transaction
701 """
701 """
702
702
703 fn = fctx.path()
703 fn = fctx.path()
704 t = fctx.data()
704 t = fctx.data()
705 fl = self.file(fn)
705 fl = self.file(fn)
706 fp1 = manifest1.get(fn, nullid)
706 fp1 = manifest1.get(fn, nullid)
707 fp2 = manifest2.get(fn, nullid)
707 fp2 = manifest2.get(fn, nullid)
708
708
709 meta = {}
709 meta = {}
710 cp = fctx.renamed()
710 cp = fctx.renamed()
711 if cp and cp[0] != fn:
711 if cp and cp[0] != fn:
712 # Mark the new revision of this file as a copy of another
712 # Mark the new revision of this file as a copy of another
713 # file. This copy data will effectively act as a parent
713 # file. This copy data will effectively act as a parent
714 # of this new revision. If this is a merge, the first
714 # of this new revision. If this is a merge, the first
715 # parent will be the nullid (meaning "look up the copy data")
715 # parent will be the nullid (meaning "look up the copy data")
716 # and the second one will be the other parent. For example:
716 # and the second one will be the other parent. For example:
717 #
717 #
718 # 0 --- 1 --- 3 rev1 changes file foo
718 # 0 --- 1 --- 3 rev1 changes file foo
719 # \ / rev2 renames foo to bar and changes it
719 # \ / rev2 renames foo to bar and changes it
720 # \- 2 -/ rev3 should have bar with all changes and
720 # \- 2 -/ rev3 should have bar with all changes and
721 # should record that bar descends from
721 # should record that bar descends from
722 # bar in rev2 and foo in rev1
722 # bar in rev2 and foo in rev1
723 #
723 #
724 # this allows this merge to succeed:
724 # this allows this merge to succeed:
725 #
725 #
726 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
726 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
727 # \ / merging rev3 and rev4 should use bar@rev2
727 # \ / merging rev3 and rev4 should use bar@rev2
728 # \- 2 --- 4 as the merge base
728 # \- 2 --- 4 as the merge base
729 #
729 #
730
730
731 cf = cp[0]
731 cf = cp[0]
732 cr = manifest1.get(cf)
732 cr = manifest1.get(cf)
733 nfp = fp2
733 nfp = fp2
734
734
735 if manifest2: # branch merge
735 if manifest2: # branch merge
736 if fp2 == nullid or cr is None: # copied on remote side
736 if fp2 == nullid or cr is None: # copied on remote side
737 if cf in manifest2:
737 if cf in manifest2:
738 cr = manifest2[cf]
738 cr = manifest2[cf]
739 nfp = fp1
739 nfp = fp1
740
740
741 # find source in nearest ancestor if we've lost track
741 # find source in nearest ancestor if we've lost track
742 if not cr:
742 if not cr:
743 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
743 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
744 (fn, cf))
744 (fn, cf))
745 for a in self['.'].ancestors():
745 for a in self['.'].ancestors():
746 if cf in a:
746 if cf in a:
747 cr = a[cf].filenode()
747 cr = a[cf].filenode()
748 break
748 break
749
749
750 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
750 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
751 meta["copy"] = cf
751 meta["copy"] = cf
752 meta["copyrev"] = hex(cr)
752 meta["copyrev"] = hex(cr)
753 fp1, fp2 = nullid, nfp
753 fp1, fp2 = nullid, nfp
754 elif fp2 != nullid:
754 elif fp2 != nullid:
755 # is one parent an ancestor of the other?
755 # is one parent an ancestor of the other?
756 fpa = fl.ancestor(fp1, fp2)
756 fpa = fl.ancestor(fp1, fp2)
757 if fpa == fp1:
757 if fpa == fp1:
758 fp1, fp2 = fp2, nullid
758 fp1, fp2 = fp2, nullid
759 elif fpa == fp2:
759 elif fpa == fp2:
760 fp2 = nullid
760 fp2 = nullid
761
761
762 # is the file unmodified from the parent? report existing entry
762 # is the file unmodified from the parent? report existing entry
763 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
763 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
764 return fp1
764 return fp1
765
765
766 changelist.append(fn)
766 changelist.append(fn)
767 return fl.add(t, meta, tr, linkrev, fp1, fp2)
767 return fl.add(t, meta, tr, linkrev, fp1, fp2)
768
768
769 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
769 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
770 if p1 is None:
770 if p1 is None:
771 p1, p2 = self.dirstate.parents()
771 p1, p2 = self.dirstate.parents()
772 return self.commit(files=files, text=text, user=user, date=date,
772 return self.commit(files=files, text=text, user=user, date=date,
773 p1=p1, p2=p2, extra=extra, empty_ok=True)
773 p1=p1, p2=p2, extra=extra, empty_ok=True)
774
774
775 def commit(self, files=None, text="", user=None, date=None,
775 def commit(self, files=None, text="", user=None, date=None,
776 match=None, force=False, force_editor=False,
776 match=None, force=False, force_editor=False,
777 p1=None, p2=None, extra={}, empty_ok=False):
777 p1=None, p2=None, extra={}, empty_ok=False):
778 wlock = lock = None
778 wlock = lock = None
779 if extra.get("close"):
779 if extra.get("close"):
780 force = True
780 force = True
781 if files:
781 if files:
782 files = util.unique(files)
782 files = util.unique(files)
783 try:
783 try:
784 wlock = self.wlock()
784 wlock = self.wlock()
785 lock = self.lock()
785 lock = self.lock()
786 use_dirstate = (p1 is None) # not rawcommit
786 use_dirstate = (p1 is None) # not rawcommit
787
787
788 if use_dirstate:
788 if use_dirstate:
789 p1, p2 = self.dirstate.parents()
789 p1, p2 = self.dirstate.parents()
790 update_dirstate = True
790 update_dirstate = True
791
791
792 if (not force and p2 != nullid and
792 if (not force and p2 != nullid and
793 (match and (match.files() or match.anypats()))):
793 (match and (match.files() or match.anypats()))):
794 raise util.Abort(_('cannot partially commit a merge '
794 raise util.Abort(_('cannot partially commit a merge '
795 '(do not specify files or patterns)'))
795 '(do not specify files or patterns)'))
796
796
797 if files:
797 if files:
798 modified, removed = [], []
798 modified, removed = [], []
799 for f in files:
799 for f in files:
800 s = self.dirstate[f]
800 s = self.dirstate[f]
801 if s in 'nma':
801 if s in 'nma':
802 modified.append(f)
802 modified.append(f)
803 elif s == 'r':
803 elif s == 'r':
804 removed.append(f)
804 removed.append(f)
805 else:
805 else:
806 self.ui.warn(_("%s not tracked!\n") % f)
806 self.ui.warn(_("%s not tracked!\n") % f)
807 changes = [modified, [], removed, [], []]
807 changes = [modified, [], removed, [], []]
808 else:
808 else:
809 changes = self.status(match=match)
809 changes = self.status(match=match)
810 else:
810 else:
811 p1, p2 = p1, p2 or nullid
811 p1, p2 = p1, p2 or nullid
812 update_dirstate = (self.dirstate.parents()[0] == p1)
812 update_dirstate = (self.dirstate.parents()[0] == p1)
813 changes = [files, [], [], [], []]
813 changes = [files, [], [], [], []]
814
814
815 ms = merge_.mergestate(self)
815 ms = merge_.mergestate(self)
816 for f in changes[0]:
816 for f in changes[0]:
817 if f in ms and ms[f] == 'u':
817 if f in ms and ms[f] == 'u':
818 raise util.Abort(_("unresolved merge conflicts "
818 raise util.Abort(_("unresolved merge conflicts "
819 "(see hg resolve)"))
819 "(see hg resolve)"))
820 wctx = context.workingctx(self, (p1, p2), text, user, date,
820 wctx = context.workingctx(self, (p1, p2), text, user, date,
821 extra, changes)
821 extra, changes)
822 return self._commitctx(wctx, force, force_editor, empty_ok,
822 return self._commitctx(wctx, force, force_editor, empty_ok,
823 use_dirstate, update_dirstate)
823 use_dirstate, update_dirstate)
824 finally:
824 finally:
825 del lock, wlock
825 del lock, wlock
826
826
827 def commitctx(self, ctx):
827 def commitctx(self, ctx):
828 """Add a new revision to current repository.
828 """Add a new revision to current repository.
829
829
830 Revision information is passed in the context.memctx argument.
830 Revision information is passed in the context.memctx argument.
831 commitctx() does not touch the working directory.
831 commitctx() does not touch the working directory.
832 """
832 """
833 wlock = lock = None
833 wlock = lock = None
834 try:
834 try:
835 wlock = self.wlock()
835 wlock = self.wlock()
836 lock = self.lock()
836 lock = self.lock()
837 return self._commitctx(ctx, force=True, force_editor=False,
837 return self._commitctx(ctx, force=True, force_editor=False,
838 empty_ok=True, use_dirstate=False,
838 empty_ok=True, use_dirstate=False,
839 update_dirstate=False)
839 update_dirstate=False)
840 finally:
840 finally:
841 del lock, wlock
841 del lock, wlock
842
842
843 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
843 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
844 use_dirstate=True, update_dirstate=True):
844 use_dirstate=True, update_dirstate=True):
845 tr = None
845 tr = None
846 valid = 0 # don't save the dirstate if this isn't set
846 valid = 0 # don't save the dirstate if this isn't set
847 try:
847 try:
848 commit = util.sort(wctx.modified() + wctx.added())
848 commit = util.sort(wctx.modified() + wctx.added())
849 remove = wctx.removed()
849 remove = wctx.removed()
850 extra = wctx.extra().copy()
850 extra = wctx.extra().copy()
851 branchname = extra['branch']
851 branchname = extra['branch']
852 user = wctx.user()
852 user = wctx.user()
853 text = wctx.description()
853 text = wctx.description()
854
854
855 if branchname == 'default' and extra.get('close'):
856 raise util.Abort(_('closing the default branch is invalid'))
857 p1, p2 = [p.node() for p in wctx.parents()]
855 p1, p2 = [p.node() for p in wctx.parents()]
858 c1 = self.changelog.read(p1)
856 c1 = self.changelog.read(p1)
859 c2 = self.changelog.read(p2)
857 c2 = self.changelog.read(p2)
860 m1 = self.manifest.read(c1[0]).copy()
858 m1 = self.manifest.read(c1[0]).copy()
861 m2 = self.manifest.read(c2[0])
859 m2 = self.manifest.read(c2[0])
862
860
863 if use_dirstate:
861 if use_dirstate:
864 oldname = c1[5].get("branch") # stored in UTF-8
862 oldname = c1[5].get("branch") # stored in UTF-8
865 if (not commit and not remove and not force and p2 == nullid
863 if (not commit and not remove and not force and p2 == nullid
866 and branchname == oldname):
864 and branchname == oldname):
867 self.ui.status(_("nothing changed\n"))
865 self.ui.status(_("nothing changed\n"))
868 return None
866 return None
869
867
870 xp1 = hex(p1)
868 xp1 = hex(p1)
871 if p2 == nullid: xp2 = ''
869 if p2 == nullid: xp2 = ''
872 else: xp2 = hex(p2)
870 else: xp2 = hex(p2)
873
871
874 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
875
873
876 tr = self.transaction()
874 tr = self.transaction()
877 trp = weakref.proxy(tr)
875 trp = weakref.proxy(tr)
878
876
879 # check in files
877 # check in files
880 new = {}
878 new = {}
881 changed = []
879 changed = []
882 linkrev = len(self)
880 linkrev = len(self)
883 for f in commit:
881 for f in commit:
884 self.ui.note(f + "\n")
882 self.ui.note(f + "\n")
885 try:
883 try:
886 fctx = wctx.filectx(f)
884 fctx = wctx.filectx(f)
887 newflags = fctx.flags()
885 newflags = fctx.flags()
888 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
886 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
889 if ((not changed or changed[-1] != f) and
887 if ((not changed or changed[-1] != f) and
890 m2.get(f) != new[f]):
888 m2.get(f) != new[f]):
891 # mention the file in the changelog if some
889 # mention the file in the changelog if some
892 # flag changed, even if there was no content
890 # flag changed, even if there was no content
893 # change.
891 # change.
894 if m1.flags(f) != newflags:
892 if m1.flags(f) != newflags:
895 changed.append(f)
893 changed.append(f)
896 m1.set(f, newflags)
894 m1.set(f, newflags)
897 if use_dirstate:
895 if use_dirstate:
898 self.dirstate.normal(f)
896 self.dirstate.normal(f)
899
897
900 except (OSError, IOError):
898 except (OSError, IOError):
901 if use_dirstate:
899 if use_dirstate:
902 self.ui.warn(_("trouble committing %s!\n") % f)
900 self.ui.warn(_("trouble committing %s!\n") % f)
903 raise
901 raise
904 else:
902 else:
905 remove.append(f)
903 remove.append(f)
906
904
907 updated, added = [], []
905 updated, added = [], []
908 for f in util.sort(changed):
906 for f in util.sort(changed):
909 if f in m1 or f in m2:
907 if f in m1 or f in m2:
910 updated.append(f)
908 updated.append(f)
911 else:
909 else:
912 added.append(f)
910 added.append(f)
913
911
914 # update manifest
912 # update manifest
915 m1.update(new)
913 m1.update(new)
916 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
914 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
917 removed1 = []
915 removed1 = []
918
916
919 for f in removed:
917 for f in removed:
920 if f in m1:
918 if f in m1:
921 del m1[f]
919 del m1[f]
922 removed1.append(f)
920 removed1.append(f)
923 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
921 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
924 (new, removed1))
922 (new, removed1))
925
923
926 # add changeset
924 # add changeset
927 if (not empty_ok and not text) or force_editor:
925 if (not empty_ok and not text) or force_editor:
928 edittext = []
926 edittext = []
929 if text:
927 if text:
930 edittext.append(text)
928 edittext.append(text)
931 edittext.append("")
929 edittext.append("")
932 edittext.append("") # Empty line between message and comments.
930 edittext.append("") # Empty line between message and comments.
933 edittext.append(_("HG: Enter commit message."
931 edittext.append(_("HG: Enter commit message."
934 " Lines beginning with 'HG:' are removed."))
932 " Lines beginning with 'HG:' are removed."))
935 edittext.append("HG: --")
933 edittext.append("HG: --")
936 edittext.append("HG: user: %s" % user)
934 edittext.append("HG: user: %s" % user)
937 if p2 != nullid:
935 if p2 != nullid:
938 edittext.append("HG: branch merge")
936 edittext.append("HG: branch merge")
939 if branchname:
937 if branchname:
940 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
938 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
941 edittext.extend(["HG: added %s" % f for f in added])
939 edittext.extend(["HG: added %s" % f for f in added])
942 edittext.extend(["HG: changed %s" % f for f in updated])
940 edittext.extend(["HG: changed %s" % f for f in updated])
943 edittext.extend(["HG: removed %s" % f for f in removed])
941 edittext.extend(["HG: removed %s" % f for f in removed])
944 if not added and not updated and not removed:
942 if not added and not updated and not removed:
945 edittext.append("HG: no files changed")
943 edittext.append("HG: no files changed")
946 edittext.append("")
944 edittext.append("")
947 # run editor in the repository root
945 # run editor in the repository root
948 olddir = os.getcwd()
946 olddir = os.getcwd()
949 os.chdir(self.root)
947 os.chdir(self.root)
950 text = self.ui.edit("\n".join(edittext), user)
948 text = self.ui.edit("\n".join(edittext), user)
951 os.chdir(olddir)
949 os.chdir(olddir)
952
950
953 lines = [line.rstrip() for line in text.rstrip().splitlines()]
951 lines = [line.rstrip() for line in text.rstrip().splitlines()]
954 while lines and not lines[0]:
952 while lines and not lines[0]:
955 del lines[0]
953 del lines[0]
956 if not lines and use_dirstate:
954 if not lines and use_dirstate:
957 raise util.Abort(_("empty commit message"))
955 raise util.Abort(_("empty commit message"))
958 text = '\n'.join(lines)
956 text = '\n'.join(lines)
959
957
960 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
958 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
961 user, wctx.date(), extra)
959 user, wctx.date(), extra)
962 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
960 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
963 parent2=xp2)
961 parent2=xp2)
964 tr.close()
962 tr.close()
965
963
966 if self.branchcache:
964 if self.branchcache:
967 self.branchtags()
965 self.branchtags()
968
966
969 if use_dirstate or update_dirstate:
967 if use_dirstate or update_dirstate:
970 self.dirstate.setparents(n)
968 self.dirstate.setparents(n)
971 if use_dirstate:
969 if use_dirstate:
972 for f in removed:
970 for f in removed:
973 self.dirstate.forget(f)
971 self.dirstate.forget(f)
974 valid = 1 # our dirstate updates are complete
972 valid = 1 # our dirstate updates are complete
975
973
976 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
974 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
977 return n
975 return n
978 finally:
976 finally:
979 if not valid: # don't save our updated dirstate
977 if not valid: # don't save our updated dirstate
980 self.dirstate.invalidate()
978 self.dirstate.invalidate()
981 del tr
979 del tr
982
980
983 def walk(self, match, node=None):
981 def walk(self, match, node=None):
984 '''
982 '''
985 walk recursively through the directory tree or a given
983 walk recursively through the directory tree or a given
986 changeset, finding all files matched by the match
984 changeset, finding all files matched by the match
987 function
985 function
988 '''
986 '''
989 return self[node].walk(match)
987 return self[node].walk(match)
990
988
991 def status(self, node1='.', node2=None, match=None,
989 def status(self, node1='.', node2=None, match=None,
992 ignored=False, clean=False, unknown=False):
990 ignored=False, clean=False, unknown=False):
993 """return status of files between two nodes or node and working directory
991 """return status of files between two nodes or node and working directory
994
992
995 If node1 is None, use the first dirstate parent instead.
993 If node1 is None, use the first dirstate parent instead.
996 If node2 is None, compare node1 with working directory.
994 If node2 is None, compare node1 with working directory.
997 """
995 """
998
996
999 def mfmatches(ctx):
997 def mfmatches(ctx):
1000 mf = ctx.manifest().copy()
998 mf = ctx.manifest().copy()
1001 for fn in mf.keys():
999 for fn in mf.keys():
1002 if not match(fn):
1000 if not match(fn):
1003 del mf[fn]
1001 del mf[fn]
1004 return mf
1002 return mf
1005
1003
1006 if isinstance(node1, context.changectx):
1004 if isinstance(node1, context.changectx):
1007 ctx1 = node1
1005 ctx1 = node1
1008 else:
1006 else:
1009 ctx1 = self[node1]
1007 ctx1 = self[node1]
1010 if isinstance(node2, context.changectx):
1008 if isinstance(node2, context.changectx):
1011 ctx2 = node2
1009 ctx2 = node2
1012 else:
1010 else:
1013 ctx2 = self[node2]
1011 ctx2 = self[node2]
1014
1012
1015 working = ctx2.rev() is None
1013 working = ctx2.rev() is None
1016 parentworking = working and ctx1 == self['.']
1014 parentworking = working and ctx1 == self['.']
1017 match = match or match_.always(self.root, self.getcwd())
1015 match = match or match_.always(self.root, self.getcwd())
1018 listignored, listclean, listunknown = ignored, clean, unknown
1016 listignored, listclean, listunknown = ignored, clean, unknown
1019
1017
1020 # load earliest manifest first for caching reasons
1018 # load earliest manifest first for caching reasons
1021 if not working and ctx2.rev() < ctx1.rev():
1019 if not working and ctx2.rev() < ctx1.rev():
1022 ctx2.manifest()
1020 ctx2.manifest()
1023
1021
1024 if not parentworking:
1022 if not parentworking:
1025 def bad(f, msg):
1023 def bad(f, msg):
1026 if f not in ctx1:
1024 if f not in ctx1:
1027 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1025 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1028 return False
1026 return False
1029 match.bad = bad
1027 match.bad = bad
1030
1028
1031 if working: # we need to scan the working dir
1029 if working: # we need to scan the working dir
1032 s = self.dirstate.status(match, listignored, listclean, listunknown)
1030 s = self.dirstate.status(match, listignored, listclean, listunknown)
1033 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1031 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1034
1032
1035 # check for any possibly clean files
1033 # check for any possibly clean files
1036 if parentworking and cmp:
1034 if parentworking and cmp:
1037 fixup = []
1035 fixup = []
1038 # do a full compare of any files that might have changed
1036 # do a full compare of any files that might have changed
1039 for f in cmp:
1037 for f in cmp:
1040 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1038 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1041 or ctx1[f].cmp(ctx2[f].data())):
1039 or ctx1[f].cmp(ctx2[f].data())):
1042 modified.append(f)
1040 modified.append(f)
1043 else:
1041 else:
1044 fixup.append(f)
1042 fixup.append(f)
1045
1043
1046 if listclean:
1044 if listclean:
1047 clean += fixup
1045 clean += fixup
1048
1046
1049 # update dirstate for files that are actually clean
1047 # update dirstate for files that are actually clean
1050 if fixup:
1048 if fixup:
1051 wlock = None
1049 wlock = None
1052 try:
1050 try:
1053 try:
1051 try:
1054 wlock = self.wlock(False)
1052 wlock = self.wlock(False)
1055 for f in fixup:
1053 for f in fixup:
1056 self.dirstate.normal(f)
1054 self.dirstate.normal(f)
1057 except lock.LockError:
1055 except lock.LockError:
1058 pass
1056 pass
1059 finally:
1057 finally:
1060 del wlock
1058 del wlock
1061
1059
1062 if not parentworking:
1060 if not parentworking:
1063 mf1 = mfmatches(ctx1)
1061 mf1 = mfmatches(ctx1)
1064 if working:
1062 if working:
1065 # we are comparing working dir against non-parent
1063 # we are comparing working dir against non-parent
1066 # generate a pseudo-manifest for the working dir
1064 # generate a pseudo-manifest for the working dir
1067 mf2 = mfmatches(self['.'])
1065 mf2 = mfmatches(self['.'])
1068 for f in cmp + modified + added:
1066 for f in cmp + modified + added:
1069 mf2[f] = None
1067 mf2[f] = None
1070 mf2.set(f, ctx2.flags(f))
1068 mf2.set(f, ctx2.flags(f))
1071 for f in removed:
1069 for f in removed:
1072 if f in mf2:
1070 if f in mf2:
1073 del mf2[f]
1071 del mf2[f]
1074 else:
1072 else:
1075 # we are comparing two revisions
1073 # we are comparing two revisions
1076 deleted, unknown, ignored = [], [], []
1074 deleted, unknown, ignored = [], [], []
1077 mf2 = mfmatches(ctx2)
1075 mf2 = mfmatches(ctx2)
1078
1076
1079 modified, added, clean = [], [], []
1077 modified, added, clean = [], [], []
1080 for fn in mf2:
1078 for fn in mf2:
1081 if fn in mf1:
1079 if fn in mf1:
1082 if (mf1.flags(fn) != mf2.flags(fn) or
1080 if (mf1.flags(fn) != mf2.flags(fn) or
1083 (mf1[fn] != mf2[fn] and
1081 (mf1[fn] != mf2[fn] and
1084 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1082 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1085 modified.append(fn)
1083 modified.append(fn)
1086 elif listclean:
1084 elif listclean:
1087 clean.append(fn)
1085 clean.append(fn)
1088 del mf1[fn]
1086 del mf1[fn]
1089 else:
1087 else:
1090 added.append(fn)
1088 added.append(fn)
1091 removed = mf1.keys()
1089 removed = mf1.keys()
1092
1090
1093 r = modified, added, removed, deleted, unknown, ignored, clean
1091 r = modified, added, removed, deleted, unknown, ignored, clean
1094 [l.sort() for l in r]
1092 [l.sort() for l in r]
1095 return r
1093 return r
1096
1094
1097 def add(self, list):
1095 def add(self, list):
1098 wlock = self.wlock()
1096 wlock = self.wlock()
1099 try:
1097 try:
1100 rejected = []
1098 rejected = []
1101 for f in list:
1099 for f in list:
1102 p = self.wjoin(f)
1100 p = self.wjoin(f)
1103 try:
1101 try:
1104 st = os.lstat(p)
1102 st = os.lstat(p)
1105 except:
1103 except:
1106 self.ui.warn(_("%s does not exist!\n") % f)
1104 self.ui.warn(_("%s does not exist!\n") % f)
1107 rejected.append(f)
1105 rejected.append(f)
1108 continue
1106 continue
1109 if st.st_size > 10000000:
1107 if st.st_size > 10000000:
1110 self.ui.warn(_("%s: files over 10MB may cause memory and"
1108 self.ui.warn(_("%s: files over 10MB may cause memory and"
1111 " performance problems\n"
1109 " performance problems\n"
1112 "(use 'hg revert %s' to unadd the file)\n")
1110 "(use 'hg revert %s' to unadd the file)\n")
1113 % (f, f))
1111 % (f, f))
1114 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1112 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1115 self.ui.warn(_("%s not added: only files and symlinks "
1113 self.ui.warn(_("%s not added: only files and symlinks "
1116 "supported currently\n") % f)
1114 "supported currently\n") % f)
1117 rejected.append(p)
1115 rejected.append(p)
1118 elif self.dirstate[f] in 'amn':
1116 elif self.dirstate[f] in 'amn':
1119 self.ui.warn(_("%s already tracked!\n") % f)
1117 self.ui.warn(_("%s already tracked!\n") % f)
1120 elif self.dirstate[f] == 'r':
1118 elif self.dirstate[f] == 'r':
1121 self.dirstate.normallookup(f)
1119 self.dirstate.normallookup(f)
1122 else:
1120 else:
1123 self.dirstate.add(f)
1121 self.dirstate.add(f)
1124 return rejected
1122 return rejected
1125 finally:
1123 finally:
1126 del wlock
1124 del wlock
1127
1125
1128 def forget(self, list):
1126 def forget(self, list):
1129 wlock = self.wlock()
1127 wlock = self.wlock()
1130 try:
1128 try:
1131 for f in list:
1129 for f in list:
1132 if self.dirstate[f] != 'a':
1130 if self.dirstate[f] != 'a':
1133 self.ui.warn(_("%s not added!\n") % f)
1131 self.ui.warn(_("%s not added!\n") % f)
1134 else:
1132 else:
1135 self.dirstate.forget(f)
1133 self.dirstate.forget(f)
1136 finally:
1134 finally:
1137 del wlock
1135 del wlock
1138
1136
1139 def remove(self, list, unlink=False):
1137 def remove(self, list, unlink=False):
1140 wlock = None
1138 wlock = None
1141 try:
1139 try:
1142 if unlink:
1140 if unlink:
1143 for f in list:
1141 for f in list:
1144 try:
1142 try:
1145 util.unlink(self.wjoin(f))
1143 util.unlink(self.wjoin(f))
1146 except OSError, inst:
1144 except OSError, inst:
1147 if inst.errno != errno.ENOENT:
1145 if inst.errno != errno.ENOENT:
1148 raise
1146 raise
1149 wlock = self.wlock()
1147 wlock = self.wlock()
1150 for f in list:
1148 for f in list:
1151 if unlink and os.path.exists(self.wjoin(f)):
1149 if unlink and os.path.exists(self.wjoin(f)):
1152 self.ui.warn(_("%s still exists!\n") % f)
1150 self.ui.warn(_("%s still exists!\n") % f)
1153 elif self.dirstate[f] == 'a':
1151 elif self.dirstate[f] == 'a':
1154 self.dirstate.forget(f)
1152 self.dirstate.forget(f)
1155 elif f not in self.dirstate:
1153 elif f not in self.dirstate:
1156 self.ui.warn(_("%s not tracked!\n") % f)
1154 self.ui.warn(_("%s not tracked!\n") % f)
1157 else:
1155 else:
1158 self.dirstate.remove(f)
1156 self.dirstate.remove(f)
1159 finally:
1157 finally:
1160 del wlock
1158 del wlock
1161
1159
1162 def undelete(self, list):
1160 def undelete(self, list):
1163 wlock = None
1161 wlock = None
1164 try:
1162 try:
1165 manifests = [self.manifest.read(self.changelog.read(p)[0])
1163 manifests = [self.manifest.read(self.changelog.read(p)[0])
1166 for p in self.dirstate.parents() if p != nullid]
1164 for p in self.dirstate.parents() if p != nullid]
1167 wlock = self.wlock()
1165 wlock = self.wlock()
1168 for f in list:
1166 for f in list:
1169 if self.dirstate[f] != 'r':
1167 if self.dirstate[f] != 'r':
1170 self.ui.warn(_("%s not removed!\n") % f)
1168 self.ui.warn(_("%s not removed!\n") % f)
1171 else:
1169 else:
1172 m = f in manifests[0] and manifests[0] or manifests[1]
1170 m = f in manifests[0] and manifests[0] or manifests[1]
1173 t = self.file(f).read(m[f])
1171 t = self.file(f).read(m[f])
1174 self.wwrite(f, t, m.flags(f))
1172 self.wwrite(f, t, m.flags(f))
1175 self.dirstate.normal(f)
1173 self.dirstate.normal(f)
1176 finally:
1174 finally:
1177 del wlock
1175 del wlock
1178
1176
1179 def copy(self, source, dest):
1177 def copy(self, source, dest):
1180 wlock = None
1178 wlock = None
1181 try:
1179 try:
1182 p = self.wjoin(dest)
1180 p = self.wjoin(dest)
1183 if not (os.path.exists(p) or os.path.islink(p)):
1181 if not (os.path.exists(p) or os.path.islink(p)):
1184 self.ui.warn(_("%s does not exist!\n") % dest)
1182 self.ui.warn(_("%s does not exist!\n") % dest)
1185 elif not (os.path.isfile(p) or os.path.islink(p)):
1183 elif not (os.path.isfile(p) or os.path.islink(p)):
1186 self.ui.warn(_("copy failed: %s is not a file or a "
1184 self.ui.warn(_("copy failed: %s is not a file or a "
1187 "symbolic link\n") % dest)
1185 "symbolic link\n") % dest)
1188 else:
1186 else:
1189 wlock = self.wlock()
1187 wlock = self.wlock()
1190 if self.dirstate[dest] in '?r':
1188 if self.dirstate[dest] in '?r':
1191 self.dirstate.add(dest)
1189 self.dirstate.add(dest)
1192 self.dirstate.copy(source, dest)
1190 self.dirstate.copy(source, dest)
1193 finally:
1191 finally:
1194 del wlock
1192 del wlock
1195
1193
1196 def heads(self, start=None, closed=True):
1194 def heads(self, start=None, closed=True):
1197 heads = self.changelog.heads(start)
1195 heads = self.changelog.heads(start)
1198 def display(head):
1196 def display(head):
1199 if closed:
1197 if closed:
1200 return True
1198 return True
1201 extras = self.changelog.read(head)[5]
1199 extras = self.changelog.read(head)[5]
1202 return ('close' not in extras)
1200 return ('close' not in extras)
1203 # sort the output in rev descending order
1201 # sort the output in rev descending order
1204 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1202 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1205 return [n for (r, n) in util.sort(heads)]
1203 return [n for (r, n) in util.sort(heads)]
1206
1204
1207 def branchheads(self, branch=None, start=None, closed=True):
1205 def branchheads(self, branch=None, start=None, closed=True):
1208 if branch is None:
1206 if branch is None:
1209 branch = self[None].branch()
1207 branch = self[None].branch()
1210 branches = self._branchheads()
1208 branches = self._branchheads()
1211 if branch not in branches:
1209 if branch not in branches:
1212 return []
1210 return []
1213 bheads = branches[branch]
1211 bheads = branches[branch]
1214 # the cache returns heads ordered lowest to highest
1212 # the cache returns heads ordered lowest to highest
1215 bheads.reverse()
1213 bheads.reverse()
1216 if start is not None:
1214 if start is not None:
1217 # filter out the heads that cannot be reached from startrev
1215 # filter out the heads that cannot be reached from startrev
1218 bheads = self.changelog.nodesbetween([start], bheads)[2]
1216 bheads = self.changelog.nodesbetween([start], bheads)[2]
1219 if not closed:
1217 if not closed:
1220 bheads = [h for h in bheads if
1218 bheads = [h for h in bheads if
1221 ('close' not in self.changelog.read(h)[5])]
1219 ('close' not in self.changelog.read(h)[5])]
1222 return bheads
1220 return bheads
1223
1221
1224 def branches(self, nodes):
1222 def branches(self, nodes):
1225 if not nodes:
1223 if not nodes:
1226 nodes = [self.changelog.tip()]
1224 nodes = [self.changelog.tip()]
1227 b = []
1225 b = []
1228 for n in nodes:
1226 for n in nodes:
1229 t = n
1227 t = n
1230 while 1:
1228 while 1:
1231 p = self.changelog.parents(n)
1229 p = self.changelog.parents(n)
1232 if p[1] != nullid or p[0] == nullid:
1230 if p[1] != nullid or p[0] == nullid:
1233 b.append((t, n, p[0], p[1]))
1231 b.append((t, n, p[0], p[1]))
1234 break
1232 break
1235 n = p[0]
1233 n = p[0]
1236 return b
1234 return b
1237
1235
1238 def between(self, pairs):
1236 def between(self, pairs):
1239 r = []
1237 r = []
1240
1238
1241 for top, bottom in pairs:
1239 for top, bottom in pairs:
1242 n, l, i = top, [], 0
1240 n, l, i = top, [], 0
1243 f = 1
1241 f = 1
1244
1242
1245 while n != bottom and n != nullid:
1243 while n != bottom and n != nullid:
1246 p = self.changelog.parents(n)[0]
1244 p = self.changelog.parents(n)[0]
1247 if i == f:
1245 if i == f:
1248 l.append(n)
1246 l.append(n)
1249 f = f * 2
1247 f = f * 2
1250 n = p
1248 n = p
1251 i += 1
1249 i += 1
1252
1250
1253 r.append(l)
1251 r.append(l)
1254
1252
1255 return r
1253 return r
1256
1254
1257 def findincoming(self, remote, base=None, heads=None, force=False):
1255 def findincoming(self, remote, base=None, heads=None, force=False):
1258 """Return list of roots of the subsets of missing nodes from remote
1256 """Return list of roots of the subsets of missing nodes from remote
1259
1257
1260 If base dict is specified, assume that these nodes and their parents
1258 If base dict is specified, assume that these nodes and their parents
1261 exist on the remote side and that no child of a node of base exists
1259 exist on the remote side and that no child of a node of base exists
1262 in both remote and self.
1260 in both remote and self.
1263 Furthermore base will be updated to include the nodes that exists
1261 Furthermore base will be updated to include the nodes that exists
1264 in self and remote but no children exists in self and remote.
1262 in self and remote but no children exists in self and remote.
1265 If a list of heads is specified, return only nodes which are heads
1263 If a list of heads is specified, return only nodes which are heads
1266 or ancestors of these heads.
1264 or ancestors of these heads.
1267
1265
1268 All the ancestors of base are in self and in remote.
1266 All the ancestors of base are in self and in remote.
1269 All the descendants of the list returned are missing in self.
1267 All the descendants of the list returned are missing in self.
1270 (and so we know that the rest of the nodes are missing in remote, see
1268 (and so we know that the rest of the nodes are missing in remote, see
1271 outgoing)
1269 outgoing)
1272 """
1270 """
1273 return self.findcommonincoming(remote, base, heads, force)[1]
1271 return self.findcommonincoming(remote, base, heads, force)[1]
1274
1272
1275 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1273 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1276 """Return a tuple (common, missing roots, heads) used to identify
1274 """Return a tuple (common, missing roots, heads) used to identify
1277 missing nodes from remote.
1275 missing nodes from remote.
1278
1276
1279 If base dict is specified, assume that these nodes and their parents
1277 If base dict is specified, assume that these nodes and their parents
1280 exist on the remote side and that no child of a node of base exists
1278 exist on the remote side and that no child of a node of base exists
1281 in both remote and self.
1279 in both remote and self.
1282 Furthermore base will be updated to include the nodes that exists
1280 Furthermore base will be updated to include the nodes that exists
1283 in self and remote but no children exists in self and remote.
1281 in self and remote but no children exists in self and remote.
1284 If a list of heads is specified, return only nodes which are heads
1282 If a list of heads is specified, return only nodes which are heads
1285 or ancestors of these heads.
1283 or ancestors of these heads.
1286
1284
1287 All the ancestors of base are in self and in remote.
1285 All the ancestors of base are in self and in remote.
1288 """
1286 """
1289 m = self.changelog.nodemap
1287 m = self.changelog.nodemap
1290 search = []
1288 search = []
1291 fetch = {}
1289 fetch = {}
1292 seen = {}
1290 seen = {}
1293 seenbranch = {}
1291 seenbranch = {}
1294 if base == None:
1292 if base == None:
1295 base = {}
1293 base = {}
1296
1294
1297 if not heads:
1295 if not heads:
1298 heads = remote.heads()
1296 heads = remote.heads()
1299
1297
1300 if self.changelog.tip() == nullid:
1298 if self.changelog.tip() == nullid:
1301 base[nullid] = 1
1299 base[nullid] = 1
1302 if heads != [nullid]:
1300 if heads != [nullid]:
1303 return [nullid], [nullid], list(heads)
1301 return [nullid], [nullid], list(heads)
1304 return [nullid], [], []
1302 return [nullid], [], []
1305
1303
1306 # assume we're closer to the tip than the root
1304 # assume we're closer to the tip than the root
1307 # and start by examining the heads
1305 # and start by examining the heads
1308 self.ui.status(_("searching for changes\n"))
1306 self.ui.status(_("searching for changes\n"))
1309
1307
1310 unknown = []
1308 unknown = []
1311 for h in heads:
1309 for h in heads:
1312 if h not in m:
1310 if h not in m:
1313 unknown.append(h)
1311 unknown.append(h)
1314 else:
1312 else:
1315 base[h] = 1
1313 base[h] = 1
1316
1314
1317 heads = unknown
1315 heads = unknown
1318 if not unknown:
1316 if not unknown:
1319 return base.keys(), [], []
1317 return base.keys(), [], []
1320
1318
1321 req = dict.fromkeys(unknown)
1319 req = dict.fromkeys(unknown)
1322 reqcnt = 0
1320 reqcnt = 0
1323
1321
1324 # search through remote branches
1322 # search through remote branches
1325 # a 'branch' here is a linear segment of history, with four parts:
1323 # a 'branch' here is a linear segment of history, with four parts:
1326 # head, root, first parent, second parent
1324 # head, root, first parent, second parent
1327 # (a branch always has two parents (or none) by definition)
1325 # (a branch always has two parents (or none) by definition)
1328 unknown = remote.branches(unknown)
1326 unknown = remote.branches(unknown)
1329 while unknown:
1327 while unknown:
1330 r = []
1328 r = []
1331 while unknown:
1329 while unknown:
1332 n = unknown.pop(0)
1330 n = unknown.pop(0)
1333 if n[0] in seen:
1331 if n[0] in seen:
1334 continue
1332 continue
1335
1333
1336 self.ui.debug(_("examining %s:%s\n")
1334 self.ui.debug(_("examining %s:%s\n")
1337 % (short(n[0]), short(n[1])))
1335 % (short(n[0]), short(n[1])))
1338 if n[0] == nullid: # found the end of the branch
1336 if n[0] == nullid: # found the end of the branch
1339 pass
1337 pass
1340 elif n in seenbranch:
1338 elif n in seenbranch:
1341 self.ui.debug(_("branch already found\n"))
1339 self.ui.debug(_("branch already found\n"))
1342 continue
1340 continue
1343 elif n[1] and n[1] in m: # do we know the base?
1341 elif n[1] and n[1] in m: # do we know the base?
1344 self.ui.debug(_("found incomplete branch %s:%s\n")
1342 self.ui.debug(_("found incomplete branch %s:%s\n")
1345 % (short(n[0]), short(n[1])))
1343 % (short(n[0]), short(n[1])))
1346 search.append(n[0:2]) # schedule branch range for scanning
1344 search.append(n[0:2]) # schedule branch range for scanning
1347 seenbranch[n] = 1
1345 seenbranch[n] = 1
1348 else:
1346 else:
1349 if n[1] not in seen and n[1] not in fetch:
1347 if n[1] not in seen and n[1] not in fetch:
1350 if n[2] in m and n[3] in m:
1348 if n[2] in m and n[3] in m:
1351 self.ui.debug(_("found new changeset %s\n") %
1349 self.ui.debug(_("found new changeset %s\n") %
1352 short(n[1]))
1350 short(n[1]))
1353 fetch[n[1]] = 1 # earliest unknown
1351 fetch[n[1]] = 1 # earliest unknown
1354 for p in n[2:4]:
1352 for p in n[2:4]:
1355 if p in m:
1353 if p in m:
1356 base[p] = 1 # latest known
1354 base[p] = 1 # latest known
1357
1355
1358 for p in n[2:4]:
1356 for p in n[2:4]:
1359 if p not in req and p not in m:
1357 if p not in req and p not in m:
1360 r.append(p)
1358 r.append(p)
1361 req[p] = 1
1359 req[p] = 1
1362 seen[n[0]] = 1
1360 seen[n[0]] = 1
1363
1361
1364 if r:
1362 if r:
1365 reqcnt += 1
1363 reqcnt += 1
1366 self.ui.debug(_("request %d: %s\n") %
1364 self.ui.debug(_("request %d: %s\n") %
1367 (reqcnt, " ".join(map(short, r))))
1365 (reqcnt, " ".join(map(short, r))))
1368 for p in xrange(0, len(r), 10):
1366 for p in xrange(0, len(r), 10):
1369 for b in remote.branches(r[p:p+10]):
1367 for b in remote.branches(r[p:p+10]):
1370 self.ui.debug(_("received %s:%s\n") %
1368 self.ui.debug(_("received %s:%s\n") %
1371 (short(b[0]), short(b[1])))
1369 (short(b[0]), short(b[1])))
1372 unknown.append(b)
1370 unknown.append(b)
1373
1371
1374 # do binary search on the branches we found
1372 # do binary search on the branches we found
1375 while search:
1373 while search:
1376 newsearch = []
1374 newsearch = []
1377 reqcnt += 1
1375 reqcnt += 1
1378 for n, l in zip(search, remote.between(search)):
1376 for n, l in zip(search, remote.between(search)):
1379 l.append(n[1])
1377 l.append(n[1])
1380 p = n[0]
1378 p = n[0]
1381 f = 1
1379 f = 1
1382 for i in l:
1380 for i in l:
1383 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1381 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1384 if i in m:
1382 if i in m:
1385 if f <= 2:
1383 if f <= 2:
1386 self.ui.debug(_("found new branch changeset %s\n") %
1384 self.ui.debug(_("found new branch changeset %s\n") %
1387 short(p))
1385 short(p))
1388 fetch[p] = 1
1386 fetch[p] = 1
1389 base[i] = 1
1387 base[i] = 1
1390 else:
1388 else:
1391 self.ui.debug(_("narrowed branch search to %s:%s\n")
1389 self.ui.debug(_("narrowed branch search to %s:%s\n")
1392 % (short(p), short(i)))
1390 % (short(p), short(i)))
1393 newsearch.append((p, i))
1391 newsearch.append((p, i))
1394 break
1392 break
1395 p, f = i, f * 2
1393 p, f = i, f * 2
1396 search = newsearch
1394 search = newsearch
1397
1395
1398 # sanity check our fetch list
1396 # sanity check our fetch list
1399 for f in fetch.keys():
1397 for f in fetch.keys():
1400 if f in m:
1398 if f in m:
1401 raise error.RepoError(_("already have changeset ")
1399 raise error.RepoError(_("already have changeset ")
1402 + short(f[:4]))
1400 + short(f[:4]))
1403
1401
1404 if base.keys() == [nullid]:
1402 if base.keys() == [nullid]:
1405 if force:
1403 if force:
1406 self.ui.warn(_("warning: repository is unrelated\n"))
1404 self.ui.warn(_("warning: repository is unrelated\n"))
1407 else:
1405 else:
1408 raise util.Abort(_("repository is unrelated"))
1406 raise util.Abort(_("repository is unrelated"))
1409
1407
1410 self.ui.debug(_("found new changesets starting at ") +
1408 self.ui.debug(_("found new changesets starting at ") +
1411 " ".join([short(f) for f in fetch]) + "\n")
1409 " ".join([short(f) for f in fetch]) + "\n")
1412
1410
1413 self.ui.debug(_("%d total queries\n") % reqcnt)
1411 self.ui.debug(_("%d total queries\n") % reqcnt)
1414
1412
1415 return base.keys(), fetch.keys(), heads
1413 return base.keys(), fetch.keys(), heads
1416
1414
1417 def findoutgoing(self, remote, base=None, heads=None, force=False):
1415 def findoutgoing(self, remote, base=None, heads=None, force=False):
1418 """Return list of nodes that are roots of subsets not in remote
1416 """Return list of nodes that are roots of subsets not in remote
1419
1417
1420 If base dict is specified, assume that these nodes and their parents
1418 If base dict is specified, assume that these nodes and their parents
1421 exist on the remote side.
1419 exist on the remote side.
1422 If a list of heads is specified, return only nodes which are heads
1420 If a list of heads is specified, return only nodes which are heads
1423 or ancestors of these heads, and return a second element which
1421 or ancestors of these heads, and return a second element which
1424 contains all remote heads which get new children.
1422 contains all remote heads which get new children.
1425 """
1423 """
1426 if base == None:
1424 if base == None:
1427 base = {}
1425 base = {}
1428 self.findincoming(remote, base, heads, force=force)
1426 self.findincoming(remote, base, heads, force=force)
1429
1427
1430 self.ui.debug(_("common changesets up to ")
1428 self.ui.debug(_("common changesets up to ")
1431 + " ".join(map(short, base.keys())) + "\n")
1429 + " ".join(map(short, base.keys())) + "\n")
1432
1430
1433 remain = dict.fromkeys(self.changelog.nodemap)
1431 remain = dict.fromkeys(self.changelog.nodemap)
1434
1432
1435 # prune everything remote has from the tree
1433 # prune everything remote has from the tree
1436 del remain[nullid]
1434 del remain[nullid]
1437 remove = base.keys()
1435 remove = base.keys()
1438 while remove:
1436 while remove:
1439 n = remove.pop(0)
1437 n = remove.pop(0)
1440 if n in remain:
1438 if n in remain:
1441 del remain[n]
1439 del remain[n]
1442 for p in self.changelog.parents(n):
1440 for p in self.changelog.parents(n):
1443 remove.append(p)
1441 remove.append(p)
1444
1442
1445 # find every node whose parents have been pruned
1443 # find every node whose parents have been pruned
1446 subset = []
1444 subset = []
1447 # find every remote head that will get new children
1445 # find every remote head that will get new children
1448 updated_heads = {}
1446 updated_heads = {}
1449 for n in remain:
1447 for n in remain:
1450 p1, p2 = self.changelog.parents(n)
1448 p1, p2 = self.changelog.parents(n)
1451 if p1 not in remain and p2 not in remain:
1449 if p1 not in remain and p2 not in remain:
1452 subset.append(n)
1450 subset.append(n)
1453 if heads:
1451 if heads:
1454 if p1 in heads:
1452 if p1 in heads:
1455 updated_heads[p1] = True
1453 updated_heads[p1] = True
1456 if p2 in heads:
1454 if p2 in heads:
1457 updated_heads[p2] = True
1455 updated_heads[p2] = True
1458
1456
1459 # this is the set of all roots we have to push
1457 # this is the set of all roots we have to push
1460 if heads:
1458 if heads:
1461 return subset, updated_heads.keys()
1459 return subset, updated_heads.keys()
1462 else:
1460 else:
1463 return subset
1461 return subset
1464
1462
1465 def pull(self, remote, heads=None, force=False):
1463 def pull(self, remote, heads=None, force=False):
1466 lock = self.lock()
1464 lock = self.lock()
1467 try:
1465 try:
1468 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1466 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1469 force=force)
1467 force=force)
1470 if fetch == [nullid]:
1468 if fetch == [nullid]:
1471 self.ui.status(_("requesting all changes\n"))
1469 self.ui.status(_("requesting all changes\n"))
1472
1470
1473 if not fetch:
1471 if not fetch:
1474 self.ui.status(_("no changes found\n"))
1472 self.ui.status(_("no changes found\n"))
1475 return 0
1473 return 0
1476
1474
1477 if heads is None and remote.capable('changegroupsubset'):
1475 if heads is None and remote.capable('changegroupsubset'):
1478 heads = rheads
1476 heads = rheads
1479
1477
1480 if heads is None:
1478 if heads is None:
1481 cg = remote.changegroup(fetch, 'pull')
1479 cg = remote.changegroup(fetch, 'pull')
1482 else:
1480 else:
1483 if not remote.capable('changegroupsubset'):
1481 if not remote.capable('changegroupsubset'):
1484 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1482 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1485 cg = remote.changegroupsubset(fetch, heads, 'pull')
1483 cg = remote.changegroupsubset(fetch, heads, 'pull')
1486 return self.addchangegroup(cg, 'pull', remote.url())
1484 return self.addchangegroup(cg, 'pull', remote.url())
1487 finally:
1485 finally:
1488 del lock
1486 del lock
1489
1487
1490 def push(self, remote, force=False, revs=None):
1488 def push(self, remote, force=False, revs=None):
1491 # there are two ways to push to remote repo:
1489 # there are two ways to push to remote repo:
1492 #
1490 #
1493 # addchangegroup assumes local user can lock remote
1491 # addchangegroup assumes local user can lock remote
1494 # repo (local filesystem, old ssh servers).
1492 # repo (local filesystem, old ssh servers).
1495 #
1493 #
1496 # unbundle assumes local user cannot lock remote repo (new ssh
1494 # unbundle assumes local user cannot lock remote repo (new ssh
1497 # servers, http servers).
1495 # servers, http servers).
1498
1496
1499 if remote.capable('unbundle'):
1497 if remote.capable('unbundle'):
1500 return self.push_unbundle(remote, force, revs)
1498 return self.push_unbundle(remote, force, revs)
1501 return self.push_addchangegroup(remote, force, revs)
1499 return self.push_addchangegroup(remote, force, revs)
1502
1500
1503 def prepush(self, remote, force, revs):
1501 def prepush(self, remote, force, revs):
1504 common = {}
1502 common = {}
1505 remote_heads = remote.heads()
1503 remote_heads = remote.heads()
1506 inc = self.findincoming(remote, common, remote_heads, force=force)
1504 inc = self.findincoming(remote, common, remote_heads, force=force)
1507
1505
1508 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1506 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1509 if revs is not None:
1507 if revs is not None:
1510 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1508 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1511 else:
1509 else:
1512 bases, heads = update, self.changelog.heads()
1510 bases, heads = update, self.changelog.heads()
1513
1511
1514 if not bases:
1512 if not bases:
1515 self.ui.status(_("no changes found\n"))
1513 self.ui.status(_("no changes found\n"))
1516 return None, 1
1514 return None, 1
1517 elif not force:
1515 elif not force:
1518 # check if we're creating new remote heads
1516 # check if we're creating new remote heads
1519 # to be a remote head after push, node must be either
1517 # to be a remote head after push, node must be either
1520 # - unknown locally
1518 # - unknown locally
1521 # - a local outgoing head descended from update
1519 # - a local outgoing head descended from update
1522 # - a remote head that's known locally and not
1520 # - a remote head that's known locally and not
1523 # ancestral to an outgoing head
1521 # ancestral to an outgoing head
1524
1522
1525 warn = 0
1523 warn = 0
1526
1524
1527 if remote_heads == [nullid]:
1525 if remote_heads == [nullid]:
1528 warn = 0
1526 warn = 0
1529 elif not revs and len(heads) > len(remote_heads):
1527 elif not revs and len(heads) > len(remote_heads):
1530 warn = 1
1528 warn = 1
1531 else:
1529 else:
1532 newheads = list(heads)
1530 newheads = list(heads)
1533 for r in remote_heads:
1531 for r in remote_heads:
1534 if r in self.changelog.nodemap:
1532 if r in self.changelog.nodemap:
1535 desc = self.changelog.heads(r, heads)
1533 desc = self.changelog.heads(r, heads)
1536 l = [h for h in heads if h in desc]
1534 l = [h for h in heads if h in desc]
1537 if not l:
1535 if not l:
1538 newheads.append(r)
1536 newheads.append(r)
1539 else:
1537 else:
1540 newheads.append(r)
1538 newheads.append(r)
1541 if len(newheads) > len(remote_heads):
1539 if len(newheads) > len(remote_heads):
1542 warn = 1
1540 warn = 1
1543
1541
1544 if warn:
1542 if warn:
1545 self.ui.warn(_("abort: push creates new remote heads!\n"))
1543 self.ui.warn(_("abort: push creates new remote heads!\n"))
1546 self.ui.status(_("(did you forget to merge?"
1544 self.ui.status(_("(did you forget to merge?"
1547 " use push -f to force)\n"))
1545 " use push -f to force)\n"))
1548 return None, 0
1546 return None, 0
1549 elif inc:
1547 elif inc:
1550 self.ui.warn(_("note: unsynced remote changes!\n"))
1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1551
1549
1552
1550
1553 if revs is None:
1551 if revs is None:
1554 # use the fast path, no race possible on push
1552 # use the fast path, no race possible on push
1555 cg = self._changegroup(common.keys(), 'push')
1553 cg = self._changegroup(common.keys(), 'push')
1556 else:
1554 else:
1557 cg = self.changegroupsubset(update, revs, 'push')
1555 cg = self.changegroupsubset(update, revs, 'push')
1558 return cg, remote_heads
1556 return cg, remote_heads
1559
1557
1560 def push_addchangegroup(self, remote, force, revs):
1558 def push_addchangegroup(self, remote, force, revs):
1561 lock = remote.lock()
1559 lock = remote.lock()
1562 try:
1560 try:
1563 ret = self.prepush(remote, force, revs)
1561 ret = self.prepush(remote, force, revs)
1564 if ret[0] is not None:
1562 if ret[0] is not None:
1565 cg, remote_heads = ret
1563 cg, remote_heads = ret
1566 return remote.addchangegroup(cg, 'push', self.url())
1564 return remote.addchangegroup(cg, 'push', self.url())
1567 return ret[1]
1565 return ret[1]
1568 finally:
1566 finally:
1569 del lock
1567 del lock
1570
1568
1571 def push_unbundle(self, remote, force, revs):
1569 def push_unbundle(self, remote, force, revs):
1572 # local repo finds heads on server, finds out what revs it
1570 # local repo finds heads on server, finds out what revs it
1573 # must push. once revs transferred, if server finds it has
1571 # must push. once revs transferred, if server finds it has
1574 # different heads (someone else won commit/push race), server
1572 # different heads (someone else won commit/push race), server
1575 # aborts.
1573 # aborts.
1576
1574
1577 ret = self.prepush(remote, force, revs)
1575 ret = self.prepush(remote, force, revs)
1578 if ret[0] is not None:
1576 if ret[0] is not None:
1579 cg, remote_heads = ret
1577 cg, remote_heads = ret
1580 if force: remote_heads = ['force']
1578 if force: remote_heads = ['force']
1581 return remote.unbundle(cg, remote_heads, 'push')
1579 return remote.unbundle(cg, remote_heads, 'push')
1582 return ret[1]
1580 return ret[1]
1583
1581
1584 def changegroupinfo(self, nodes, source):
1582 def changegroupinfo(self, nodes, source):
1585 if self.ui.verbose or source == 'bundle':
1583 if self.ui.verbose or source == 'bundle':
1586 self.ui.status(_("%d changesets found\n") % len(nodes))
1584 self.ui.status(_("%d changesets found\n") % len(nodes))
1587 if self.ui.debugflag:
1585 if self.ui.debugflag:
1588 self.ui.debug(_("list of changesets:\n"))
1586 self.ui.debug(_("list of changesets:\n"))
1589 for node in nodes:
1587 for node in nodes:
1590 self.ui.debug("%s\n" % hex(node))
1588 self.ui.debug("%s\n" % hex(node))
1591
1589
1592 def changegroupsubset(self, bases, heads, source, extranodes=None):
1590 def changegroupsubset(self, bases, heads, source, extranodes=None):
1593 """This function generates a changegroup consisting of all the nodes
1591 """This function generates a changegroup consisting of all the nodes
1594 that are descendents of any of the bases, and ancestors of any of
1592 that are descendents of any of the bases, and ancestors of any of
1595 the heads.
1593 the heads.
1596
1594
1597 It is fairly complex as determining which filenodes and which
1595 It is fairly complex as determining which filenodes and which
1598 manifest nodes need to be included for the changeset to be complete
1596 manifest nodes need to be included for the changeset to be complete
1599 is non-trivial.
1597 is non-trivial.
1600
1598
1601 Another wrinkle is doing the reverse, figuring out which changeset in
1599 Another wrinkle is doing the reverse, figuring out which changeset in
1602 the changegroup a particular filenode or manifestnode belongs to.
1600 the changegroup a particular filenode or manifestnode belongs to.
1603
1601
1604 The caller can specify some nodes that must be included in the
1602 The caller can specify some nodes that must be included in the
1605 changegroup using the extranodes argument. It should be a dict
1603 changegroup using the extranodes argument. It should be a dict
1606 where the keys are the filenames (or 1 for the manifest), and the
1604 where the keys are the filenames (or 1 for the manifest), and the
1607 values are lists of (node, linknode) tuples, where node is a wanted
1605 values are lists of (node, linknode) tuples, where node is a wanted
1608 node and linknode is the changelog node that should be transmitted as
1606 node and linknode is the changelog node that should be transmitted as
1609 the linkrev.
1607 the linkrev.
1610 """
1608 """
1611
1609
1612 if extranodes is None:
1610 if extranodes is None:
1613 # can we go through the fast path ?
1611 # can we go through the fast path ?
1614 heads.sort()
1612 heads.sort()
1615 allheads = self.heads()
1613 allheads = self.heads()
1616 allheads.sort()
1614 allheads.sort()
1617 if heads == allheads:
1615 if heads == allheads:
1618 common = []
1616 common = []
1619 # parents of bases are known from both sides
1617 # parents of bases are known from both sides
1620 for n in bases:
1618 for n in bases:
1621 for p in self.changelog.parents(n):
1619 for p in self.changelog.parents(n):
1622 if p != nullid:
1620 if p != nullid:
1623 common.append(p)
1621 common.append(p)
1624 return self._changegroup(common, source)
1622 return self._changegroup(common, source)
1625
1623
1626 self.hook('preoutgoing', throw=True, source=source)
1624 self.hook('preoutgoing', throw=True, source=source)
1627
1625
1628 # Set up some initial variables
1626 # Set up some initial variables
1629 # Make it easy to refer to self.changelog
1627 # Make it easy to refer to self.changelog
1630 cl = self.changelog
1628 cl = self.changelog
1631 # msng is short for missing - compute the list of changesets in this
1629 # msng is short for missing - compute the list of changesets in this
1632 # changegroup.
1630 # changegroup.
1633 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1631 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1634 self.changegroupinfo(msng_cl_lst, source)
1632 self.changegroupinfo(msng_cl_lst, source)
1635 # Some bases may turn out to be superfluous, and some heads may be
1633 # Some bases may turn out to be superfluous, and some heads may be
1636 # too. nodesbetween will return the minimal set of bases and heads
1634 # too. nodesbetween will return the minimal set of bases and heads
1637 # necessary to re-create the changegroup.
1635 # necessary to re-create the changegroup.
1638
1636
1639 # Known heads are the list of heads that it is assumed the recipient
1637 # Known heads are the list of heads that it is assumed the recipient
1640 # of this changegroup will know about.
1638 # of this changegroup will know about.
1641 knownheads = {}
1639 knownheads = {}
1642 # We assume that all parents of bases are known heads.
1640 # We assume that all parents of bases are known heads.
1643 for n in bases:
1641 for n in bases:
1644 for p in cl.parents(n):
1642 for p in cl.parents(n):
1645 if p != nullid:
1643 if p != nullid:
1646 knownheads[p] = 1
1644 knownheads[p] = 1
1647 knownheads = knownheads.keys()
1645 knownheads = knownheads.keys()
1648 if knownheads:
1646 if knownheads:
1649 # Now that we know what heads are known, we can compute which
1647 # Now that we know what heads are known, we can compute which
1650 # changesets are known. The recipient must know about all
1648 # changesets are known. The recipient must know about all
1651 # changesets required to reach the known heads from the null
1649 # changesets required to reach the known heads from the null
1652 # changeset.
1650 # changeset.
1653 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1651 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1654 junk = None
1652 junk = None
1655 # Transform the list into an ersatz set.
1653 # Transform the list into an ersatz set.
1656 has_cl_set = dict.fromkeys(has_cl_set)
1654 has_cl_set = dict.fromkeys(has_cl_set)
1657 else:
1655 else:
1658 # If there were no known heads, the recipient cannot be assumed to
1656 # If there were no known heads, the recipient cannot be assumed to
1659 # know about any changesets.
1657 # know about any changesets.
1660 has_cl_set = {}
1658 has_cl_set = {}
1661
1659
1662 # Make it easy to refer to self.manifest
1660 # Make it easy to refer to self.manifest
1663 mnfst = self.manifest
1661 mnfst = self.manifest
1664 # We don't know which manifests are missing yet
1662 # We don't know which manifests are missing yet
1665 msng_mnfst_set = {}
1663 msng_mnfst_set = {}
1666 # Nor do we know which filenodes are missing.
1664 # Nor do we know which filenodes are missing.
1667 msng_filenode_set = {}
1665 msng_filenode_set = {}
1668
1666
1669 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1667 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1670 junk = None
1668 junk = None
1671
1669
1672 # A changeset always belongs to itself, so the changenode lookup
1670 # A changeset always belongs to itself, so the changenode lookup
1673 # function for a changenode is identity.
1671 # function for a changenode is identity.
1674 def identity(x):
1672 def identity(x):
1675 return x
1673 return x
1676
1674
1677 # A function generating function. Sets up an environment for the
1675 # A function generating function. Sets up an environment for the
1678 # inner function.
1676 # inner function.
1679 def cmp_by_rev_func(revlog):
1677 def cmp_by_rev_func(revlog):
1680 # Compare two nodes by their revision number in the environment's
1678 # Compare two nodes by their revision number in the environment's
1681 # revision history. Since the revision number both represents the
1679 # revision history. Since the revision number both represents the
1682 # most efficient order to read the nodes in, and represents a
1680 # most efficient order to read the nodes in, and represents a
1683 # topological sorting of the nodes, this function is often useful.
1681 # topological sorting of the nodes, this function is often useful.
1684 def cmp_by_rev(a, b):
1682 def cmp_by_rev(a, b):
1685 return cmp(revlog.rev(a), revlog.rev(b))
1683 return cmp(revlog.rev(a), revlog.rev(b))
1686 return cmp_by_rev
1684 return cmp_by_rev
1687
1685
1688 # If we determine that a particular file or manifest node must be a
1686 # If we determine that a particular file or manifest node must be a
1689 # node that the recipient of the changegroup will already have, we can
1687 # node that the recipient of the changegroup will already have, we can
1690 # also assume the recipient will have all the parents. This function
1688 # also assume the recipient will have all the parents. This function
1691 # prunes them from the set of missing nodes.
1689 # prunes them from the set of missing nodes.
1692 def prune_parents(revlog, hasset, msngset):
1690 def prune_parents(revlog, hasset, msngset):
1693 haslst = hasset.keys()
1691 haslst = hasset.keys()
1694 haslst.sort(cmp_by_rev_func(revlog))
1692 haslst.sort(cmp_by_rev_func(revlog))
1695 for node in haslst:
1693 for node in haslst:
1696 parentlst = [p for p in revlog.parents(node) if p != nullid]
1694 parentlst = [p for p in revlog.parents(node) if p != nullid]
1697 while parentlst:
1695 while parentlst:
1698 n = parentlst.pop()
1696 n = parentlst.pop()
1699 if n not in hasset:
1697 if n not in hasset:
1700 hasset[n] = 1
1698 hasset[n] = 1
1701 p = [p for p in revlog.parents(n) if p != nullid]
1699 p = [p for p in revlog.parents(n) if p != nullid]
1702 parentlst.extend(p)
1700 parentlst.extend(p)
1703 for n in hasset:
1701 for n in hasset:
1704 msngset.pop(n, None)
1702 msngset.pop(n, None)
1705
1703
1706 # This is a function generating function used to set up an environment
1704 # This is a function generating function used to set up an environment
1707 # for the inner function to execute in.
1705 # for the inner function to execute in.
1708 def manifest_and_file_collector(changedfileset):
1706 def manifest_and_file_collector(changedfileset):
1709 # This is an information gathering function that gathers
1707 # This is an information gathering function that gathers
1710 # information from each changeset node that goes out as part of
1708 # information from each changeset node that goes out as part of
1711 # the changegroup. The information gathered is a list of which
1709 # the changegroup. The information gathered is a list of which
1712 # manifest nodes are potentially required (the recipient may
1710 # manifest nodes are potentially required (the recipient may
1713 # already have them) and total list of all files which were
1711 # already have them) and total list of all files which were
1714 # changed in any changeset in the changegroup.
1712 # changed in any changeset in the changegroup.
1715 #
1713 #
1716 # We also remember the first changenode we saw any manifest
1714 # We also remember the first changenode we saw any manifest
1717 # referenced by so we can later determine which changenode 'owns'
1715 # referenced by so we can later determine which changenode 'owns'
1718 # the manifest.
1716 # the manifest.
1719 def collect_manifests_and_files(clnode):
1717 def collect_manifests_and_files(clnode):
1720 c = cl.read(clnode)
1718 c = cl.read(clnode)
1721 for f in c[3]:
1719 for f in c[3]:
1722 # This is to make sure we only have one instance of each
1720 # This is to make sure we only have one instance of each
1723 # filename string for each filename.
1721 # filename string for each filename.
1724 changedfileset.setdefault(f, f)
1722 changedfileset.setdefault(f, f)
1725 msng_mnfst_set.setdefault(c[0], clnode)
1723 msng_mnfst_set.setdefault(c[0], clnode)
1726 return collect_manifests_and_files
1724 return collect_manifests_and_files
1727
1725
1728 # Figure out which manifest nodes (of the ones we think might be part
1726 # Figure out which manifest nodes (of the ones we think might be part
1729 # of the changegroup) the recipient must know about and remove them
1727 # of the changegroup) the recipient must know about and remove them
1730 # from the changegroup.
1728 # from the changegroup.
1731 def prune_manifests():
1729 def prune_manifests():
1732 has_mnfst_set = {}
1730 has_mnfst_set = {}
1733 for n in msng_mnfst_set:
1731 for n in msng_mnfst_set:
1734 # If a 'missing' manifest thinks it belongs to a changenode
1732 # If a 'missing' manifest thinks it belongs to a changenode
1735 # the recipient is assumed to have, obviously the recipient
1733 # the recipient is assumed to have, obviously the recipient
1736 # must have that manifest.
1734 # must have that manifest.
1737 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1735 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1738 if linknode in has_cl_set:
1736 if linknode in has_cl_set:
1739 has_mnfst_set[n] = 1
1737 has_mnfst_set[n] = 1
1740 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1738 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1741
1739
1742 # Use the information collected in collect_manifests_and_files to say
1740 # Use the information collected in collect_manifests_and_files to say
1743 # which changenode any manifestnode belongs to.
1741 # which changenode any manifestnode belongs to.
1744 def lookup_manifest_link(mnfstnode):
1742 def lookup_manifest_link(mnfstnode):
1745 return msng_mnfst_set[mnfstnode]
1743 return msng_mnfst_set[mnfstnode]
1746
1744
1747 # A function generating function that sets up the initial environment
1745 # A function generating function that sets up the initial environment
1748 # the inner function.
1746 # the inner function.
1749 def filenode_collector(changedfiles):
1747 def filenode_collector(changedfiles):
1750 next_rev = [0]
1748 next_rev = [0]
1751 # This gathers information from each manifestnode included in the
1749 # This gathers information from each manifestnode included in the
1752 # changegroup about which filenodes the manifest node references
1750 # changegroup about which filenodes the manifest node references
1753 # so we can include those in the changegroup too.
1751 # so we can include those in the changegroup too.
1754 #
1752 #
1755 # It also remembers which changenode each filenode belongs to. It
1753 # It also remembers which changenode each filenode belongs to. It
1756 # does this by assuming the a filenode belongs to the changenode
1754 # does this by assuming the a filenode belongs to the changenode
1757 # the first manifest that references it belongs to.
1755 # the first manifest that references it belongs to.
1758 def collect_msng_filenodes(mnfstnode):
1756 def collect_msng_filenodes(mnfstnode):
1759 r = mnfst.rev(mnfstnode)
1757 r = mnfst.rev(mnfstnode)
1760 if r == next_rev[0]:
1758 if r == next_rev[0]:
1761 # If the last rev we looked at was the one just previous,
1759 # If the last rev we looked at was the one just previous,
1762 # we only need to see a diff.
1760 # we only need to see a diff.
1763 deltamf = mnfst.readdelta(mnfstnode)
1761 deltamf = mnfst.readdelta(mnfstnode)
1764 # For each line in the delta
1762 # For each line in the delta
1765 for f, fnode in deltamf.iteritems():
1763 for f, fnode in deltamf.iteritems():
1766 f = changedfiles.get(f, None)
1764 f = changedfiles.get(f, None)
1767 # And if the file is in the list of files we care
1765 # And if the file is in the list of files we care
1768 # about.
1766 # about.
1769 if f is not None:
1767 if f is not None:
1770 # Get the changenode this manifest belongs to
1768 # Get the changenode this manifest belongs to
1771 clnode = msng_mnfst_set[mnfstnode]
1769 clnode = msng_mnfst_set[mnfstnode]
1772 # Create the set of filenodes for the file if
1770 # Create the set of filenodes for the file if
1773 # there isn't one already.
1771 # there isn't one already.
1774 ndset = msng_filenode_set.setdefault(f, {})
1772 ndset = msng_filenode_set.setdefault(f, {})
1775 # And set the filenode's changelog node to the
1773 # And set the filenode's changelog node to the
1776 # manifest's if it hasn't been set already.
1774 # manifest's if it hasn't been set already.
1777 ndset.setdefault(fnode, clnode)
1775 ndset.setdefault(fnode, clnode)
1778 else:
1776 else:
1779 # Otherwise we need a full manifest.
1777 # Otherwise we need a full manifest.
1780 m = mnfst.read(mnfstnode)
1778 m = mnfst.read(mnfstnode)
1781 # For every file in we care about.
1779 # For every file in we care about.
1782 for f in changedfiles:
1780 for f in changedfiles:
1783 fnode = m.get(f, None)
1781 fnode = m.get(f, None)
1784 # If it's in the manifest
1782 # If it's in the manifest
1785 if fnode is not None:
1783 if fnode is not None:
1786 # See comments above.
1784 # See comments above.
1787 clnode = msng_mnfst_set[mnfstnode]
1785 clnode = msng_mnfst_set[mnfstnode]
1788 ndset = msng_filenode_set.setdefault(f, {})
1786 ndset = msng_filenode_set.setdefault(f, {})
1789 ndset.setdefault(fnode, clnode)
1787 ndset.setdefault(fnode, clnode)
1790 # Remember the revision we hope to see next.
1788 # Remember the revision we hope to see next.
1791 next_rev[0] = r + 1
1789 next_rev[0] = r + 1
1792 return collect_msng_filenodes
1790 return collect_msng_filenodes
1793
1791
1794 # We have a list of filenodes we think we need for a file, lets remove
1792 # We have a list of filenodes we think we need for a file, lets remove
1795 # all those we now the recipient must have.
1793 # all those we now the recipient must have.
1796 def prune_filenodes(f, filerevlog):
1794 def prune_filenodes(f, filerevlog):
1797 msngset = msng_filenode_set[f]
1795 msngset = msng_filenode_set[f]
1798 hasset = {}
1796 hasset = {}
1799 # If a 'missing' filenode thinks it belongs to a changenode we
1797 # If a 'missing' filenode thinks it belongs to a changenode we
1800 # assume the recipient must have, then the recipient must have
1798 # assume the recipient must have, then the recipient must have
1801 # that filenode.
1799 # that filenode.
1802 for n in msngset:
1800 for n in msngset:
1803 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1801 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1804 if clnode in has_cl_set:
1802 if clnode in has_cl_set:
1805 hasset[n] = 1
1803 hasset[n] = 1
1806 prune_parents(filerevlog, hasset, msngset)
1804 prune_parents(filerevlog, hasset, msngset)
1807
1805
1808 # A function generator function that sets up the a context for the
1806 # A function generator function that sets up the a context for the
1809 # inner function.
1807 # inner function.
1810 def lookup_filenode_link_func(fname):
1808 def lookup_filenode_link_func(fname):
1811 msngset = msng_filenode_set[fname]
1809 msngset = msng_filenode_set[fname]
1812 # Lookup the changenode the filenode belongs to.
1810 # Lookup the changenode the filenode belongs to.
1813 def lookup_filenode_link(fnode):
1811 def lookup_filenode_link(fnode):
1814 return msngset[fnode]
1812 return msngset[fnode]
1815 return lookup_filenode_link
1813 return lookup_filenode_link
1816
1814
1817 # Add the nodes that were explicitly requested.
1815 # Add the nodes that were explicitly requested.
1818 def add_extra_nodes(name, nodes):
1816 def add_extra_nodes(name, nodes):
1819 if not extranodes or name not in extranodes:
1817 if not extranodes or name not in extranodes:
1820 return
1818 return
1821
1819
1822 for node, linknode in extranodes[name]:
1820 for node, linknode in extranodes[name]:
1823 if node not in nodes:
1821 if node not in nodes:
1824 nodes[node] = linknode
1822 nodes[node] = linknode
1825
1823
1826 # Now that we have all theses utility functions to help out and
1824 # Now that we have all theses utility functions to help out and
1827 # logically divide up the task, generate the group.
1825 # logically divide up the task, generate the group.
1828 def gengroup():
1826 def gengroup():
1829 # The set of changed files starts empty.
1827 # The set of changed files starts empty.
1830 changedfiles = {}
1828 changedfiles = {}
1831 # Create a changenode group generator that will call our functions
1829 # Create a changenode group generator that will call our functions
1832 # back to lookup the owning changenode and collect information.
1830 # back to lookup the owning changenode and collect information.
1833 group = cl.group(msng_cl_lst, identity,
1831 group = cl.group(msng_cl_lst, identity,
1834 manifest_and_file_collector(changedfiles))
1832 manifest_and_file_collector(changedfiles))
1835 for chnk in group:
1833 for chnk in group:
1836 yield chnk
1834 yield chnk
1837
1835
1838 # The list of manifests has been collected by the generator
1836 # The list of manifests has been collected by the generator
1839 # calling our functions back.
1837 # calling our functions back.
1840 prune_manifests()
1838 prune_manifests()
1841 add_extra_nodes(1, msng_mnfst_set)
1839 add_extra_nodes(1, msng_mnfst_set)
1842 msng_mnfst_lst = msng_mnfst_set.keys()
1840 msng_mnfst_lst = msng_mnfst_set.keys()
1843 # Sort the manifestnodes by revision number.
1841 # Sort the manifestnodes by revision number.
1844 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1842 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1845 # Create a generator for the manifestnodes that calls our lookup
1843 # Create a generator for the manifestnodes that calls our lookup
1846 # and data collection functions back.
1844 # and data collection functions back.
1847 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1845 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1848 filenode_collector(changedfiles))
1846 filenode_collector(changedfiles))
1849 for chnk in group:
1847 for chnk in group:
1850 yield chnk
1848 yield chnk
1851
1849
1852 # These are no longer needed, dereference and toss the memory for
1850 # These are no longer needed, dereference and toss the memory for
1853 # them.
1851 # them.
1854 msng_mnfst_lst = None
1852 msng_mnfst_lst = None
1855 msng_mnfst_set.clear()
1853 msng_mnfst_set.clear()
1856
1854
1857 if extranodes:
1855 if extranodes:
1858 for fname in extranodes:
1856 for fname in extranodes:
1859 if isinstance(fname, int):
1857 if isinstance(fname, int):
1860 continue
1858 continue
1861 msng_filenode_set.setdefault(fname, {})
1859 msng_filenode_set.setdefault(fname, {})
1862 changedfiles[fname] = 1
1860 changedfiles[fname] = 1
1863 # Go through all our files in order sorted by name.
1861 # Go through all our files in order sorted by name.
1864 for fname in util.sort(changedfiles):
1862 for fname in util.sort(changedfiles):
1865 filerevlog = self.file(fname)
1863 filerevlog = self.file(fname)
1866 if not len(filerevlog):
1864 if not len(filerevlog):
1867 raise util.Abort(_("empty or missing revlog for %s") % fname)
1865 raise util.Abort(_("empty or missing revlog for %s") % fname)
1868 # Toss out the filenodes that the recipient isn't really
1866 # Toss out the filenodes that the recipient isn't really
1869 # missing.
1867 # missing.
1870 if fname in msng_filenode_set:
1868 if fname in msng_filenode_set:
1871 prune_filenodes(fname, filerevlog)
1869 prune_filenodes(fname, filerevlog)
1872 add_extra_nodes(fname, msng_filenode_set[fname])
1870 add_extra_nodes(fname, msng_filenode_set[fname])
1873 msng_filenode_lst = msng_filenode_set[fname].keys()
1871 msng_filenode_lst = msng_filenode_set[fname].keys()
1874 else:
1872 else:
1875 msng_filenode_lst = []
1873 msng_filenode_lst = []
1876 # If any filenodes are left, generate the group for them,
1874 # If any filenodes are left, generate the group for them,
1877 # otherwise don't bother.
1875 # otherwise don't bother.
1878 if len(msng_filenode_lst) > 0:
1876 if len(msng_filenode_lst) > 0:
1879 yield changegroup.chunkheader(len(fname))
1877 yield changegroup.chunkheader(len(fname))
1880 yield fname
1878 yield fname
1881 # Sort the filenodes by their revision #
1879 # Sort the filenodes by their revision #
1882 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1880 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1883 # Create a group generator and only pass in a changenode
1881 # Create a group generator and only pass in a changenode
1884 # lookup function as we need to collect no information
1882 # lookup function as we need to collect no information
1885 # from filenodes.
1883 # from filenodes.
1886 group = filerevlog.group(msng_filenode_lst,
1884 group = filerevlog.group(msng_filenode_lst,
1887 lookup_filenode_link_func(fname))
1885 lookup_filenode_link_func(fname))
1888 for chnk in group:
1886 for chnk in group:
1889 yield chnk
1887 yield chnk
1890 if fname in msng_filenode_set:
1888 if fname in msng_filenode_set:
1891 # Don't need this anymore, toss it to free memory.
1889 # Don't need this anymore, toss it to free memory.
1892 del msng_filenode_set[fname]
1890 del msng_filenode_set[fname]
1893 # Signal that no more groups are left.
1891 # Signal that no more groups are left.
1894 yield changegroup.closechunk()
1892 yield changegroup.closechunk()
1895
1893
1896 if msng_cl_lst:
1894 if msng_cl_lst:
1897 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1895 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1898
1896
1899 return util.chunkbuffer(gengroup())
1897 return util.chunkbuffer(gengroup())
1900
1898
1901 def changegroup(self, basenodes, source):
1899 def changegroup(self, basenodes, source):
1902 # to avoid a race we use changegroupsubset() (issue1320)
1900 # to avoid a race we use changegroupsubset() (issue1320)
1903 return self.changegroupsubset(basenodes, self.heads(), source)
1901 return self.changegroupsubset(basenodes, self.heads(), source)
1904
1902
1905 def _changegroup(self, common, source):
1903 def _changegroup(self, common, source):
1906 """Generate a changegroup of all nodes that we have that a recipient
1904 """Generate a changegroup of all nodes that we have that a recipient
1907 doesn't.
1905 doesn't.
1908
1906
1909 This is much easier than the previous function as we can assume that
1907 This is much easier than the previous function as we can assume that
1910 the recipient has any changenode we aren't sending them.
1908 the recipient has any changenode we aren't sending them.
1911
1909
1912 common is the set of common nodes between remote and self"""
1910 common is the set of common nodes between remote and self"""
1913
1911
1914 self.hook('preoutgoing', throw=True, source=source)
1912 self.hook('preoutgoing', throw=True, source=source)
1915
1913
1916 cl = self.changelog
1914 cl = self.changelog
1917 nodes = cl.findmissing(common)
1915 nodes = cl.findmissing(common)
1918 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1916 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1919 self.changegroupinfo(nodes, source)
1917 self.changegroupinfo(nodes, source)
1920
1918
1921 def identity(x):
1919 def identity(x):
1922 return x
1920 return x
1923
1921
1924 def gennodelst(log):
1922 def gennodelst(log):
1925 for r in log:
1923 for r in log:
1926 if log.linkrev(r) in revset:
1924 if log.linkrev(r) in revset:
1927 yield log.node(r)
1925 yield log.node(r)
1928
1926
1929 def changed_file_collector(changedfileset):
1927 def changed_file_collector(changedfileset):
1930 def collect_changed_files(clnode):
1928 def collect_changed_files(clnode):
1931 c = cl.read(clnode)
1929 c = cl.read(clnode)
1932 for fname in c[3]:
1930 for fname in c[3]:
1933 changedfileset[fname] = 1
1931 changedfileset[fname] = 1
1934 return collect_changed_files
1932 return collect_changed_files
1935
1933
1936 def lookuprevlink_func(revlog):
1934 def lookuprevlink_func(revlog):
1937 def lookuprevlink(n):
1935 def lookuprevlink(n):
1938 return cl.node(revlog.linkrev(revlog.rev(n)))
1936 return cl.node(revlog.linkrev(revlog.rev(n)))
1939 return lookuprevlink
1937 return lookuprevlink
1940
1938
1941 def gengroup():
1939 def gengroup():
1942 # construct a list of all changed files
1940 # construct a list of all changed files
1943 changedfiles = {}
1941 changedfiles = {}
1944
1942
1945 for chnk in cl.group(nodes, identity,
1943 for chnk in cl.group(nodes, identity,
1946 changed_file_collector(changedfiles)):
1944 changed_file_collector(changedfiles)):
1947 yield chnk
1945 yield chnk
1948
1946
1949 mnfst = self.manifest
1947 mnfst = self.manifest
1950 nodeiter = gennodelst(mnfst)
1948 nodeiter = gennodelst(mnfst)
1951 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1949 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1952 yield chnk
1950 yield chnk
1953
1951
1954 for fname in util.sort(changedfiles):
1952 for fname in util.sort(changedfiles):
1955 filerevlog = self.file(fname)
1953 filerevlog = self.file(fname)
1956 if not len(filerevlog):
1954 if not len(filerevlog):
1957 raise util.Abort(_("empty or missing revlog for %s") % fname)
1955 raise util.Abort(_("empty or missing revlog for %s") % fname)
1958 nodeiter = gennodelst(filerevlog)
1956 nodeiter = gennodelst(filerevlog)
1959 nodeiter = list(nodeiter)
1957 nodeiter = list(nodeiter)
1960 if nodeiter:
1958 if nodeiter:
1961 yield changegroup.chunkheader(len(fname))
1959 yield changegroup.chunkheader(len(fname))
1962 yield fname
1960 yield fname
1963 lookup = lookuprevlink_func(filerevlog)
1961 lookup = lookuprevlink_func(filerevlog)
1964 for chnk in filerevlog.group(nodeiter, lookup):
1962 for chnk in filerevlog.group(nodeiter, lookup):
1965 yield chnk
1963 yield chnk
1966
1964
1967 yield changegroup.closechunk()
1965 yield changegroup.closechunk()
1968
1966
1969 if nodes:
1967 if nodes:
1970 self.hook('outgoing', node=hex(nodes[0]), source=source)
1968 self.hook('outgoing', node=hex(nodes[0]), source=source)
1971
1969
1972 return util.chunkbuffer(gengroup())
1970 return util.chunkbuffer(gengroup())
1973
1971
1974 def addchangegroup(self, source, srctype, url, emptyok=False):
1972 def addchangegroup(self, source, srctype, url, emptyok=False):
1975 """add changegroup to repo.
1973 """add changegroup to repo.
1976
1974
1977 return values:
1975 return values:
1978 - nothing changed or no source: 0
1976 - nothing changed or no source: 0
1979 - more heads than before: 1+added heads (2..n)
1977 - more heads than before: 1+added heads (2..n)
1980 - less heads than before: -1-removed heads (-2..-n)
1978 - less heads than before: -1-removed heads (-2..-n)
1981 - number of heads stays the same: 1
1979 - number of heads stays the same: 1
1982 """
1980 """
1983 def csmap(x):
1981 def csmap(x):
1984 self.ui.debug(_("add changeset %s\n") % short(x))
1982 self.ui.debug(_("add changeset %s\n") % short(x))
1985 return len(cl)
1983 return len(cl)
1986
1984
1987 def revmap(x):
1985 def revmap(x):
1988 return cl.rev(x)
1986 return cl.rev(x)
1989
1987
1990 if not source:
1988 if not source:
1991 return 0
1989 return 0
1992
1990
1993 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1991 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1994
1992
1995 changesets = files = revisions = 0
1993 changesets = files = revisions = 0
1996
1994
1997 # write changelog data to temp files so concurrent readers will not see
1995 # write changelog data to temp files so concurrent readers will not see
1998 # inconsistent view
1996 # inconsistent view
1999 cl = self.changelog
1997 cl = self.changelog
2000 cl.delayupdate()
1998 cl.delayupdate()
2001 oldheads = len(cl.heads())
1999 oldheads = len(cl.heads())
2002
2000
2003 tr = self.transaction()
2001 tr = self.transaction()
2004 try:
2002 try:
2005 trp = weakref.proxy(tr)
2003 trp = weakref.proxy(tr)
2006 # pull off the changeset group
2004 # pull off the changeset group
2007 self.ui.status(_("adding changesets\n"))
2005 self.ui.status(_("adding changesets\n"))
2008 cor = len(cl) - 1
2006 cor = len(cl) - 1
2009 chunkiter = changegroup.chunkiter(source)
2007 chunkiter = changegroup.chunkiter(source)
2010 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2008 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2011 raise util.Abort(_("received changelog group is empty"))
2009 raise util.Abort(_("received changelog group is empty"))
2012 cnr = len(cl) - 1
2010 cnr = len(cl) - 1
2013 changesets = cnr - cor
2011 changesets = cnr - cor
2014
2012
2015 # pull off the manifest group
2013 # pull off the manifest group
2016 self.ui.status(_("adding manifests\n"))
2014 self.ui.status(_("adding manifests\n"))
2017 chunkiter = changegroup.chunkiter(source)
2015 chunkiter = changegroup.chunkiter(source)
2018 # no need to check for empty manifest group here:
2016 # no need to check for empty manifest group here:
2019 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2017 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2020 # no new manifest will be created and the manifest group will
2018 # no new manifest will be created and the manifest group will
2021 # be empty during the pull
2019 # be empty during the pull
2022 self.manifest.addgroup(chunkiter, revmap, trp)
2020 self.manifest.addgroup(chunkiter, revmap, trp)
2023
2021
2024 # process the files
2022 # process the files
2025 self.ui.status(_("adding file changes\n"))
2023 self.ui.status(_("adding file changes\n"))
2026 while 1:
2024 while 1:
2027 f = changegroup.getchunk(source)
2025 f = changegroup.getchunk(source)
2028 if not f:
2026 if not f:
2029 break
2027 break
2030 self.ui.debug(_("adding %s revisions\n") % f)
2028 self.ui.debug(_("adding %s revisions\n") % f)
2031 fl = self.file(f)
2029 fl = self.file(f)
2032 o = len(fl)
2030 o = len(fl)
2033 chunkiter = changegroup.chunkiter(source)
2031 chunkiter = changegroup.chunkiter(source)
2034 if fl.addgroup(chunkiter, revmap, trp) is None:
2032 if fl.addgroup(chunkiter, revmap, trp) is None:
2035 raise util.Abort(_("received file revlog group is empty"))
2033 raise util.Abort(_("received file revlog group is empty"))
2036 revisions += len(fl) - o
2034 revisions += len(fl) - o
2037 files += 1
2035 files += 1
2038
2036
2039 # make changelog see real files again
2037 # make changelog see real files again
2040 cl.finalize(trp)
2038 cl.finalize(trp)
2041
2039
2042 newheads = len(self.changelog.heads())
2040 newheads = len(self.changelog.heads())
2043 heads = ""
2041 heads = ""
2044 if oldheads and newheads != oldheads:
2042 if oldheads and newheads != oldheads:
2045 heads = _(" (%+d heads)") % (newheads - oldheads)
2043 heads = _(" (%+d heads)") % (newheads - oldheads)
2046
2044
2047 self.ui.status(_("added %d changesets"
2045 self.ui.status(_("added %d changesets"
2048 " with %d changes to %d files%s\n")
2046 " with %d changes to %d files%s\n")
2049 % (changesets, revisions, files, heads))
2047 % (changesets, revisions, files, heads))
2050
2048
2051 if changesets > 0:
2049 if changesets > 0:
2052 self.hook('pretxnchangegroup', throw=True,
2050 self.hook('pretxnchangegroup', throw=True,
2053 node=hex(self.changelog.node(cor+1)), source=srctype,
2051 node=hex(self.changelog.node(cor+1)), source=srctype,
2054 url=url)
2052 url=url)
2055
2053
2056 tr.close()
2054 tr.close()
2057 finally:
2055 finally:
2058 del tr
2056 del tr
2059
2057
2060 if changesets > 0:
2058 if changesets > 0:
2061 # forcefully update the on-disk branch cache
2059 # forcefully update the on-disk branch cache
2062 self.ui.debug(_("updating the branch cache\n"))
2060 self.ui.debug(_("updating the branch cache\n"))
2063 self.branchtags()
2061 self.branchtags()
2064 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2062 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2065 source=srctype, url=url)
2063 source=srctype, url=url)
2066
2064
2067 for i in xrange(cor + 1, cnr + 1):
2065 for i in xrange(cor + 1, cnr + 1):
2068 self.hook("incoming", node=hex(self.changelog.node(i)),
2066 self.hook("incoming", node=hex(self.changelog.node(i)),
2069 source=srctype, url=url)
2067 source=srctype, url=url)
2070
2068
2071 # never return 0 here:
2069 # never return 0 here:
2072 if newheads < oldheads:
2070 if newheads < oldheads:
2073 return newheads - oldheads - 1
2071 return newheads - oldheads - 1
2074 else:
2072 else:
2075 return newheads - oldheads + 1
2073 return newheads - oldheads + 1
2076
2074
2077
2075
2078 def stream_in(self, remote):
2076 def stream_in(self, remote):
2079 fp = remote.stream_out()
2077 fp = remote.stream_out()
2080 l = fp.readline()
2078 l = fp.readline()
2081 try:
2079 try:
2082 resp = int(l)
2080 resp = int(l)
2083 except ValueError:
2081 except ValueError:
2084 raise error.ResponseError(
2082 raise error.ResponseError(
2085 _('Unexpected response from remote server:'), l)
2083 _('Unexpected response from remote server:'), l)
2086 if resp == 1:
2084 if resp == 1:
2087 raise util.Abort(_('operation forbidden by server'))
2085 raise util.Abort(_('operation forbidden by server'))
2088 elif resp == 2:
2086 elif resp == 2:
2089 raise util.Abort(_('locking the remote repository failed'))
2087 raise util.Abort(_('locking the remote repository failed'))
2090 elif resp != 0:
2088 elif resp != 0:
2091 raise util.Abort(_('the server sent an unknown error code'))
2089 raise util.Abort(_('the server sent an unknown error code'))
2092 self.ui.status(_('streaming all changes\n'))
2090 self.ui.status(_('streaming all changes\n'))
2093 l = fp.readline()
2091 l = fp.readline()
2094 try:
2092 try:
2095 total_files, total_bytes = map(int, l.split(' ', 1))
2093 total_files, total_bytes = map(int, l.split(' ', 1))
2096 except (ValueError, TypeError):
2094 except (ValueError, TypeError):
2097 raise error.ResponseError(
2095 raise error.ResponseError(
2098 _('Unexpected response from remote server:'), l)
2096 _('Unexpected response from remote server:'), l)
2099 self.ui.status(_('%d files to transfer, %s of data\n') %
2097 self.ui.status(_('%d files to transfer, %s of data\n') %
2100 (total_files, util.bytecount(total_bytes)))
2098 (total_files, util.bytecount(total_bytes)))
2101 start = time.time()
2099 start = time.time()
2102 for i in xrange(total_files):
2100 for i in xrange(total_files):
2103 # XXX doesn't support '\n' or '\r' in filenames
2101 # XXX doesn't support '\n' or '\r' in filenames
2104 l = fp.readline()
2102 l = fp.readline()
2105 try:
2103 try:
2106 name, size = l.split('\0', 1)
2104 name, size = l.split('\0', 1)
2107 size = int(size)
2105 size = int(size)
2108 except (ValueError, TypeError):
2106 except (ValueError, TypeError):
2109 raise error.ResponseError(
2107 raise error.ResponseError(
2110 _('Unexpected response from remote server:'), l)
2108 _('Unexpected response from remote server:'), l)
2111 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2109 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2112 ofp = self.sopener(name, 'w')
2110 ofp = self.sopener(name, 'w')
2113 for chunk in util.filechunkiter(fp, limit=size):
2111 for chunk in util.filechunkiter(fp, limit=size):
2114 ofp.write(chunk)
2112 ofp.write(chunk)
2115 ofp.close()
2113 ofp.close()
2116 elapsed = time.time() - start
2114 elapsed = time.time() - start
2117 if elapsed <= 0:
2115 if elapsed <= 0:
2118 elapsed = 0.001
2116 elapsed = 0.001
2119 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2120 (util.bytecount(total_bytes), elapsed,
2118 (util.bytecount(total_bytes), elapsed,
2121 util.bytecount(total_bytes / elapsed)))
2119 util.bytecount(total_bytes / elapsed)))
2122 self.invalidate()
2120 self.invalidate()
2123 return len(self.heads()) + 1
2121 return len(self.heads()) + 1
2124
2122
2125 def clone(self, remote, heads=[], stream=False):
2123 def clone(self, remote, heads=[], stream=False):
2126 '''clone remote repository.
2124 '''clone remote repository.
2127
2125
2128 keyword arguments:
2126 keyword arguments:
2129 heads: list of revs to clone (forces use of pull)
2127 heads: list of revs to clone (forces use of pull)
2130 stream: use streaming clone if possible'''
2128 stream: use streaming clone if possible'''
2131
2129
2132 # now, all clients that can request uncompressed clones can
2130 # now, all clients that can request uncompressed clones can
2133 # read repo formats supported by all servers that can serve
2131 # read repo formats supported by all servers that can serve
2134 # them.
2132 # them.
2135
2133
2136 # if revlog format changes, client will have to check version
2134 # if revlog format changes, client will have to check version
2137 # and format flags on "stream" capability, and use
2135 # and format flags on "stream" capability, and use
2138 # uncompressed only if compatible.
2136 # uncompressed only if compatible.
2139
2137
2140 if stream and not heads and remote.capable('stream'):
2138 if stream and not heads and remote.capable('stream'):
2141 return self.stream_in(remote)
2139 return self.stream_in(remote)
2142 return self.pull(remote, heads)
2140 return self.pull(remote, heads)
2143
2141
2144 # used to avoid circular references so destructors work
2142 # used to avoid circular references so destructors work
2145 def aftertrans(files):
2143 def aftertrans(files):
2146 renamefiles = [tuple(t) for t in files]
2144 renamefiles = [tuple(t) for t in files]
2147 def a():
2145 def a():
2148 for src, dest in renamefiles:
2146 for src, dest in renamefiles:
2149 util.rename(src, dest)
2147 util.rename(src, dest)
2150 return a
2148 return a
2151
2149
2152 def instance(ui, path, create):
2150 def instance(ui, path, create):
2153 return localrepository(ui, util.drop_scheme('file', path), create)
2151 return localrepository(ui, util.drop_scheme('file', path), create)
2154
2152
2155 def islocal(path):
2153 def islocal(path):
2156 return True
2154 return True
General Comments 0
You need to be logged in to leave comments. Login now