##// END OF EJS Templates
salvaged: record salvaged in ChangingFiles at commit time...
marmoute -
r46236:751d9436 default draft
parent child Browse files
Show More
@@ -1,458 +1,471 b''
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 context,
18 context,
19 mergestate,
19 mergestate,
20 metadata,
20 metadata,
21 phases,
21 phases,
22 scmutil,
22 scmutil,
23 subrepoutil,
23 subrepoutil,
24 )
24 )
25
25
26
26
27 def _write_copy_meta(repo):
27 def _write_copy_meta(repo):
28 """return a (changelog, filelog) boolean tuple
28 """return a (changelog, filelog) boolean tuple
29
29
30 changelog: copy related information should be stored in the changeset
30 changelog: copy related information should be stored in the changeset
31 filelof: copy related information should be written in the file revision
31 filelof: copy related information should be written in the file revision
32 """
32 """
33 if repo.filecopiesmode == b'changeset-sidedata':
33 if repo.filecopiesmode == b'changeset-sidedata':
34 writechangesetcopy = True
34 writechangesetcopy = True
35 writefilecopymeta = True
35 writefilecopymeta = True
36 else:
36 else:
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
38 writefilecopymeta = writecopiesto != b'changeset-only'
38 writefilecopymeta = writecopiesto != b'changeset-only'
39 writechangesetcopy = writecopiesto in (
39 writechangesetcopy = writecopiesto in (
40 b'changeset-only',
40 b'changeset-only',
41 b'compatibility',
41 b'compatibility',
42 )
42 )
43 return writechangesetcopy, writefilecopymeta
43 return writechangesetcopy, writefilecopymeta
44
44
45
45
46 def commitctx(repo, ctx, error=False, origctx=None):
46 def commitctx(repo, ctx, error=False, origctx=None):
47 """Add a new revision to the target repository.
47 """Add a new revision to the target repository.
48 Revision information is passed via the context argument.
48 Revision information is passed via the context argument.
49
49
50 ctx.files() should list all files involved in this commit, i.e.
50 ctx.files() should list all files involved in this commit, i.e.
51 modified/added/removed files. On merge, it may be wider than the
51 modified/added/removed files. On merge, it may be wider than the
52 ctx.files() to be committed, since any file nodes derived directly
52 ctx.files() to be committed, since any file nodes derived directly
53 from p1 or p2 are excluded from the committed ctx.files().
53 from p1 or p2 are excluded from the committed ctx.files().
54
54
55 origctx is for convert to work around the problem that bug
55 origctx is for convert to work around the problem that bug
56 fixes to the files list in changesets change hashes. For
56 fixes to the files list in changesets change hashes. For
57 convert to be the identity, it can pass an origctx and this
57 convert to be the identity, it can pass an origctx and this
58 function will use the same files list when it makes sense to
58 function will use the same files list when it makes sense to
59 do so.
59 do so.
60 """
60 """
61 repo = repo.unfiltered()
61 repo = repo.unfiltered()
62
62
63 p1, p2 = ctx.p1(), ctx.p2()
63 p1, p2 = ctx.p1(), ctx.p2()
64 user = ctx.user()
64 user = ctx.user()
65
65
66 with repo.lock(), repo.transaction(b"commit") as tr:
66 with repo.lock(), repo.transaction(b"commit") as tr:
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
68
68
69 extra = ctx.extra().copy()
69 extra = ctx.extra().copy()
70
70
71 if extra is not None:
71 if extra is not None:
72 for name in (
72 for name in (
73 b'p1copies',
73 b'p1copies',
74 b'p2copies',
74 b'p2copies',
75 b'filesadded',
75 b'filesadded',
76 b'filesremoved',
76 b'filesremoved',
77 ):
77 ):
78 extra.pop(name, None)
78 extra.pop(name, None)
79 if repo.changelog._copiesstorage == b'extra':
79 if repo.changelog._copiesstorage == b'extra':
80 extra = _extra_with_copies(repo, extra, files)
80 extra = _extra_with_copies(repo, extra, files)
81
81
82 # update changelog
82 # update changelog
83 repo.ui.note(_(b"committing changelog\n"))
83 repo.ui.note(_(b"committing changelog\n"))
84 repo.changelog.delayupdate(tr)
84 repo.changelog.delayupdate(tr)
85 n = repo.changelog.add(
85 n = repo.changelog.add(
86 mn,
86 mn,
87 files,
87 files,
88 ctx.description(),
88 ctx.description(),
89 tr,
89 tr,
90 p1.node(),
90 p1.node(),
91 p2.node(),
91 p2.node(),
92 user,
92 user,
93 ctx.date(),
93 ctx.date(),
94 extra,
94 extra,
95 )
95 )
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
97 repo.hook(
97 repo.hook(
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
99 )
99 )
100 # set the new commit is proper phase
100 # set the new commit is proper phase
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
102 if targetphase:
102 if targetphase:
103 # retract boundary do not alter parent changeset.
103 # retract boundary do not alter parent changeset.
104 # if a parent have higher the resulting phase will
104 # if a parent have higher the resulting phase will
105 # be compliant anyway
105 # be compliant anyway
106 #
106 #
107 # if minimal phase was 0 we don't need to retract anything
107 # if minimal phase was 0 we don't need to retract anything
108 phases.registernew(repo, tr, targetphase, [n])
108 phases.registernew(repo, tr, targetphase, [n])
109 return n
109 return n
110
110
111
111
112 def _prepare_files(tr, ctx, error=False, origctx=None):
112 def _prepare_files(tr, ctx, error=False, origctx=None):
113 repo = ctx.repo()
113 repo = ctx.repo()
114 p1 = ctx.p1()
114 p1 = ctx.p1()
115
115
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
117
117
118 if ctx.manifestnode():
118 if ctx.manifestnode():
119 # reuse an existing manifest revision
119 # reuse an existing manifest revision
120 repo.ui.debug(b'reusing known manifest\n')
120 repo.ui.debug(b'reusing known manifest\n')
121 mn = ctx.manifestnode()
121 mn = ctx.manifestnode()
122 files = metadata.ChangingFiles()
122 files = metadata.ChangingFiles()
123 files.update_touched(ctx.files())
123 files.update_touched(ctx.files())
124 if writechangesetcopy:
124 if writechangesetcopy:
125 files.update_added(ctx.filesadded())
125 files.update_added(ctx.filesadded())
126 files.update_removed(ctx.filesremoved())
126 files.update_removed(ctx.filesremoved())
127 elif not ctx.files():
127 elif not ctx.files():
128 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
128 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
129 mn = p1.manifestnode()
129 mn = p1.manifestnode()
130 files = metadata.ChangingFiles()
130 files = metadata.ChangingFiles()
131 else:
131 else:
132 mn, files = _process_files(tr, ctx, error=error)
132 mn, files = _process_files(tr, ctx, error=error)
133
133
134 if origctx and origctx.manifestnode() == mn:
134 if origctx and origctx.manifestnode() == mn:
135 origfiles = origctx.files()
135 origfiles = origctx.files()
136 assert files.touched.issubset(origfiles)
136 assert files.touched.issubset(origfiles)
137 files.update_touched(origfiles)
137 files.update_touched(origfiles)
138
138
139 if writechangesetcopy:
139 if writechangesetcopy:
140 files.update_copies_from_p1(ctx.p1copies())
140 files.update_copies_from_p1(ctx.p1copies())
141 files.update_copies_from_p2(ctx.p2copies())
141 files.update_copies_from_p2(ctx.p2copies())
142
142
143 copy_sd = ctx.repo().filecopiesmode == b'changeset-sidedata'
144 if copy_sd and len(ctx.parents()) > 1:
145 # XXX this `mergestate.read` could be duplicated with a the merge state
146 # reading in _process_files So we could refactor further to reuse it in
147 # some cases.
148 ms = mergestate.mergestate.read(repo)
149 if ms.active():
150 for fname in sorted(ms._stateextras.keys()):
151 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
152 if might_removed == b'yes':
153 if fname in ctx:
154 files.mark_salvaged(fname)
155
143 return mn, files
156 return mn, files
144
157
145
158
146 def _process_files(tr, ctx, error=False):
159 def _process_files(tr, ctx, error=False):
147 repo = ctx.repo()
160 repo = ctx.repo()
148 p1 = ctx.p1()
161 p1 = ctx.p1()
149 p2 = ctx.p2()
162 p2 = ctx.p2()
150
163
151 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
164 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
152
165
153 m1ctx = p1.manifestctx()
166 m1ctx = p1.manifestctx()
154 m2ctx = p2.manifestctx()
167 m2ctx = p2.manifestctx()
155 mctx = m1ctx.copy()
168 mctx = m1ctx.copy()
156
169
157 m = mctx.read()
170 m = mctx.read()
158 m1 = m1ctx.read()
171 m1 = m1ctx.read()
159 m2 = m2ctx.read()
172 m2 = m2ctx.read()
160 ms = mergestate.mergestate.read(repo)
173 ms = mergestate.mergestate.read(repo)
161
174
162 files = metadata.ChangingFiles()
175 files = metadata.ChangingFiles()
163
176
164 # check in files
177 # check in files
165 added = []
178 added = []
166 removed = list(ctx.removed())
179 removed = list(ctx.removed())
167 linkrev = len(repo)
180 linkrev = len(repo)
168 repo.ui.note(_(b"committing files:\n"))
181 repo.ui.note(_(b"committing files:\n"))
169 uipathfn = scmutil.getuipathfn(repo)
182 uipathfn = scmutil.getuipathfn(repo)
170 for f in sorted(ctx.modified() + ctx.added()):
183 for f in sorted(ctx.modified() + ctx.added()):
171 repo.ui.note(uipathfn(f) + b"\n")
184 repo.ui.note(uipathfn(f) + b"\n")
172 try:
185 try:
173 fctx = ctx[f]
186 fctx = ctx[f]
174 if fctx is None:
187 if fctx is None:
175 removed.append(f)
188 removed.append(f)
176 else:
189 else:
177 added.append(f)
190 added.append(f)
178 m[f], is_touched = _filecommit(
191 m[f], is_touched = _filecommit(
179 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
192 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
180 )
193 )
181 if is_touched:
194 if is_touched:
182 if is_touched == 'added':
195 if is_touched == 'added':
183 files.mark_added(f)
196 files.mark_added(f)
184 elif is_touched == 'merged':
197 elif is_touched == 'merged':
185 files.mark_merged(f)
198 files.mark_merged(f)
186 else:
199 else:
187 files.mark_touched(f)
200 files.mark_touched(f)
188 m.setflag(f, fctx.flags())
201 m.setflag(f, fctx.flags())
189 except OSError:
202 except OSError:
190 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
203 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
191 raise
204 raise
192 except IOError as inst:
205 except IOError as inst:
193 errcode = getattr(inst, 'errno', errno.ENOENT)
206 errcode = getattr(inst, 'errno', errno.ENOENT)
194 if error or errcode and errcode != errno.ENOENT:
207 if error or errcode and errcode != errno.ENOENT:
195 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
208 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
196 raise
209 raise
197
210
198 # update manifest
211 # update manifest
199 removed = [f for f in removed if f in m1 or f in m2]
212 removed = [f for f in removed if f in m1 or f in m2]
200 drop = sorted([f for f in removed if f in m])
213 drop = sorted([f for f in removed if f in m])
201 for f in drop:
214 for f in drop:
202 del m[f]
215 del m[f]
203 if p2.rev() == nullrev:
216 if p2.rev() == nullrev:
204 files.update_removed(removed)
217 files.update_removed(removed)
205 else:
218 else:
206 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
219 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
207 for f in removed:
220 for f in removed:
208 if not rf(f):
221 if not rf(f):
209 files.mark_removed(f)
222 files.mark_removed(f)
210
223
211 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
224 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
212
225
213 return mn, files
226 return mn, files
214
227
215
228
216 def _filecommit(
229 def _filecommit(
217 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
230 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
218 ):
231 ):
219 """
232 """
220 commit an individual file as part of a larger transaction
233 commit an individual file as part of a larger transaction
221
234
222 input:
235 input:
223
236
224 fctx: a file context with the content we are trying to commit
237 fctx: a file context with the content we are trying to commit
225 manifest1: manifest of changeset first parent
238 manifest1: manifest of changeset first parent
226 manifest2: manifest of changeset second parent
239 manifest2: manifest of changeset second parent
227 linkrev: revision number of the changeset being created
240 linkrev: revision number of the changeset being created
228 tr: current transation
241 tr: current transation
229 includecopymeta: boolean, set to False to skip storing the copy data
242 includecopymeta: boolean, set to False to skip storing the copy data
230 (only used by the Google specific feature of using
243 (only used by the Google specific feature of using
231 changeset extra as copy source of truth).
244 changeset extra as copy source of truth).
232 ms: mergestate object
245 ms: mergestate object
233
246
234 output: (filenode, touched)
247 output: (filenode, touched)
235
248
236 filenode: the filenode that should be used by this changeset
249 filenode: the filenode that should be used by this changeset
237 touched: one of: None (mean untouched), 'added' or 'modified'
250 touched: one of: None (mean untouched), 'added' or 'modified'
238 """
251 """
239
252
240 fname = fctx.path()
253 fname = fctx.path()
241 fparent1 = manifest1.get(fname, nullid)
254 fparent1 = manifest1.get(fname, nullid)
242 fparent2 = manifest2.get(fname, nullid)
255 fparent2 = manifest2.get(fname, nullid)
243 touched = None
256 touched = None
244 if fparent1 == fparent2 == nullid:
257 if fparent1 == fparent2 == nullid:
245 touched = 'added'
258 touched = 'added'
246
259
247 if isinstance(fctx, context.filectx):
260 if isinstance(fctx, context.filectx):
248 # This block fast path most comparisons which are usually done. It
261 # This block fast path most comparisons which are usually done. It
249 # assumes that bare filectx is used and no merge happened, hence no
262 # assumes that bare filectx is used and no merge happened, hence no
250 # need to create a new file revision in this case.
263 # need to create a new file revision in this case.
251 node = fctx.filenode()
264 node = fctx.filenode()
252 if node in [fparent1, fparent2]:
265 if node in [fparent1, fparent2]:
253 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
266 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
254 if (
267 if (
255 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
268 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
256 ) or (
269 ) or (
257 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
270 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
258 ):
271 ):
259 touched = 'modified'
272 touched = 'modified'
260 return node, touched
273 return node, touched
261
274
262 flog = repo.file(fname)
275 flog = repo.file(fname)
263 meta = {}
276 meta = {}
264 cfname = fctx.copysource()
277 cfname = fctx.copysource()
265 fnode = None
278 fnode = None
266
279
267 if cfname and cfname != fname:
280 if cfname and cfname != fname:
268 # Mark the new revision of this file as a copy of another
281 # Mark the new revision of this file as a copy of another
269 # file. This copy data will effectively act as a parent
282 # file. This copy data will effectively act as a parent
270 # of this new revision. If this is a merge, the first
283 # of this new revision. If this is a merge, the first
271 # parent will be the nullid (meaning "look up the copy data")
284 # parent will be the nullid (meaning "look up the copy data")
272 # and the second one will be the other parent. For example:
285 # and the second one will be the other parent. For example:
273 #
286 #
274 # 0 --- 1 --- 3 rev1 changes file foo
287 # 0 --- 1 --- 3 rev1 changes file foo
275 # \ / rev2 renames foo to bar and changes it
288 # \ / rev2 renames foo to bar and changes it
276 # \- 2 -/ rev3 should have bar with all changes and
289 # \- 2 -/ rev3 should have bar with all changes and
277 # should record that bar descends from
290 # should record that bar descends from
278 # bar in rev2 and foo in rev1
291 # bar in rev2 and foo in rev1
279 #
292 #
280 # this allows this merge to succeed:
293 # this allows this merge to succeed:
281 #
294 #
282 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
295 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
283 # \ / merging rev3 and rev4 should use bar@rev2
296 # \ / merging rev3 and rev4 should use bar@rev2
284 # \- 2 --- 4 as the merge base
297 # \- 2 --- 4 as the merge base
285 #
298 #
286
299
287 cnode = manifest1.get(cfname)
300 cnode = manifest1.get(cfname)
288 newfparent = fparent2
301 newfparent = fparent2
289
302
290 if manifest2: # branch merge
303 if manifest2: # branch merge
291 if fparent2 == nullid or cnode is None: # copied on remote side
304 if fparent2 == nullid or cnode is None: # copied on remote side
292 if cfname in manifest2:
305 if cfname in manifest2:
293 cnode = manifest2[cfname]
306 cnode = manifest2[cfname]
294 newfparent = fparent1
307 newfparent = fparent1
295
308
296 # Here, we used to search backwards through history to try to find
309 # Here, we used to search backwards through history to try to find
297 # where the file copy came from if the source of a copy was not in
310 # where the file copy came from if the source of a copy was not in
298 # the parent directory. However, this doesn't actually make sense to
311 # the parent directory. However, this doesn't actually make sense to
299 # do (what does a copy from something not in your working copy even
312 # do (what does a copy from something not in your working copy even
300 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
313 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
301 # the user that copy information was dropped, so if they didn't
314 # the user that copy information was dropped, so if they didn't
302 # expect this outcome it can be fixed, but this is the correct
315 # expect this outcome it can be fixed, but this is the correct
303 # behavior in this circumstance.
316 # behavior in this circumstance.
304
317
305 if cnode:
318 if cnode:
306 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
319 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
307 if includecopymeta:
320 if includecopymeta:
308 meta[b"copy"] = cfname
321 meta[b"copy"] = cfname
309 meta[b"copyrev"] = hex(cnode)
322 meta[b"copyrev"] = hex(cnode)
310 fparent1, fparent2 = nullid, newfparent
323 fparent1, fparent2 = nullid, newfparent
311 else:
324 else:
312 repo.ui.warn(
325 repo.ui.warn(
313 _(
326 _(
314 b"warning: can't find ancestor for '%s' "
327 b"warning: can't find ancestor for '%s' "
315 b"copied from '%s'!\n"
328 b"copied from '%s'!\n"
316 )
329 )
317 % (fname, cfname)
330 % (fname, cfname)
318 )
331 )
319
332
320 elif fparent1 == nullid:
333 elif fparent1 == nullid:
321 fparent1, fparent2 = fparent2, nullid
334 fparent1, fparent2 = fparent2, nullid
322 elif fparent2 != nullid:
335 elif fparent2 != nullid:
323 # is one parent an ancestor of the other?
336 # is one parent an ancestor of the other?
324 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
337 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
325 if fparent1 in fparentancestors:
338 if fparent1 in fparentancestors:
326 fparent1, fparent2 = fparent2, nullid
339 fparent1, fparent2 = fparent2, nullid
327 elif fparent2 in fparentancestors:
340 elif fparent2 in fparentancestors:
328 fparent2 = nullid
341 fparent2 = nullid
329 elif not fparentancestors:
342 elif not fparentancestors:
330 # TODO: this whole if-else might be simplified much more
343 # TODO: this whole if-else might be simplified much more
331 if (
344 if (
332 ms.active()
345 ms.active()
333 and ms.extras(fname).get(b'filenode-source') == b'other'
346 and ms.extras(fname).get(b'filenode-source') == b'other'
334 ):
347 ):
335 fparent1, fparent2 = fparent2, nullid
348 fparent1, fparent2 = fparent2, nullid
336
349
337 force_new_node = False
350 force_new_node = False
338 # The file might have been deleted by merge code and user explicitly choose
351 # The file might have been deleted by merge code and user explicitly choose
339 # to revert the file and keep it. The other case can be where there is
352 # to revert the file and keep it. The other case can be where there is
340 # change-delete or delete-change conflict and user explicitly choose to keep
353 # change-delete or delete-change conflict and user explicitly choose to keep
341 # the file. The goal is to create a new filenode for users explicit choices
354 # the file. The goal is to create a new filenode for users explicit choices
342 if (
355 if (
343 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
356 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
344 and ms.active()
357 and ms.active()
345 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
358 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
346 ):
359 ):
347 force_new_node = True
360 force_new_node = True
348 # is the file changed?
361 # is the file changed?
349 text = fctx.data()
362 text = fctx.data()
350 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
363 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
351 if touched is None: # do not overwrite added
364 if touched is None: # do not overwrite added
352 if fparent2 == nullid:
365 if fparent2 == nullid:
353 touched = 'modified'
366 touched = 'modified'
354 else:
367 else:
355 touched = 'merged'
368 touched = 'merged'
356 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
369 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
357 # are just the flags changed during merge?
370 # are just the flags changed during merge?
358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
371 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
359 touched = 'modified'
372 touched = 'modified'
360 fnode = fparent1
373 fnode = fparent1
361 else:
374 else:
362 fnode = fparent1
375 fnode = fparent1
363 return fnode, touched
376 return fnode, touched
364
377
365
378
366 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
379 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
367 """make a new manifest entry (or reuse a new one)
380 """make a new manifest entry (or reuse a new one)
368
381
369 given an initialised manifest context and precomputed list of
382 given an initialised manifest context and precomputed list of
370 - files: files affected by the commit
383 - files: files affected by the commit
371 - added: new entries in the manifest
384 - added: new entries in the manifest
372 - drop: entries present in parents but absent of this one
385 - drop: entries present in parents but absent of this one
373
386
374 Create a new manifest revision, reuse existing ones if possible.
387 Create a new manifest revision, reuse existing ones if possible.
375
388
376 Return the nodeid of the manifest revision.
389 Return the nodeid of the manifest revision.
377 """
390 """
378 repo = ctx.repo()
391 repo = ctx.repo()
379
392
380 md = None
393 md = None
381
394
382 # all this is cached, so it is find to get them all from the ctx.
395 # all this is cached, so it is find to get them all from the ctx.
383 p1 = ctx.p1()
396 p1 = ctx.p1()
384 p2 = ctx.p2()
397 p2 = ctx.p2()
385 m1ctx = p1.manifestctx()
398 m1ctx = p1.manifestctx()
386
399
387 m1 = m1ctx.read()
400 m1 = m1ctx.read()
388
401
389 if not files:
402 if not files:
390 # if no "files" actually changed in terms of the changelog,
403 # if no "files" actually changed in terms of the changelog,
391 # try hard to detect unmodified manifest entry so that the
404 # try hard to detect unmodified manifest entry so that the
392 # exact same commit can be reproduced later on convert.
405 # exact same commit can be reproduced later on convert.
393 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
406 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
394 if not files and md:
407 if not files and md:
395 repo.ui.debug(
408 repo.ui.debug(
396 b'not reusing manifest (no file change in '
409 b'not reusing manifest (no file change in '
397 b'changelog, but manifest differs)\n'
410 b'changelog, but manifest differs)\n'
398 )
411 )
399 if files or md:
412 if files or md:
400 repo.ui.note(_(b"committing manifest\n"))
413 repo.ui.note(_(b"committing manifest\n"))
401 # we're using narrowmatch here since it's already applied at
414 # we're using narrowmatch here since it's already applied at
402 # other stages (such as dirstate.walk), so we're already
415 # other stages (such as dirstate.walk), so we're already
403 # ignoring things outside of narrowspec in most cases. The
416 # ignoring things outside of narrowspec in most cases. The
404 # one case where we might have files outside the narrowspec
417 # one case where we might have files outside the narrowspec
405 # at this point is merges, and we already error out in the
418 # at this point is merges, and we already error out in the
406 # case where the merge has files outside of the narrowspec,
419 # case where the merge has files outside of the narrowspec,
407 # so this is safe.
420 # so this is safe.
408 mn = mctx.write(
421 mn = mctx.write(
409 tr,
422 tr,
410 linkrev,
423 linkrev,
411 p1.manifestnode(),
424 p1.manifestnode(),
412 p2.manifestnode(),
425 p2.manifestnode(),
413 added,
426 added,
414 drop,
427 drop,
415 match=repo.narrowmatch(),
428 match=repo.narrowmatch(),
416 )
429 )
417 else:
430 else:
418 repo.ui.debug(
431 repo.ui.debug(
419 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
432 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
420 )
433 )
421 mn = p1.manifestnode()
434 mn = p1.manifestnode()
422
435
423 return mn
436 return mn
424
437
425
438
426 def _extra_with_copies(repo, extra, files):
439 def _extra_with_copies(repo, extra, files):
427 """encode copy information into a `extra` dictionnary"""
440 """encode copy information into a `extra` dictionnary"""
428 p1copies = files.copied_from_p1
441 p1copies = files.copied_from_p1
429 p2copies = files.copied_from_p2
442 p2copies = files.copied_from_p2
430 filesadded = files.added
443 filesadded = files.added
431 filesremoved = files.removed
444 filesremoved = files.removed
432 files = sorted(files.touched)
445 files = sorted(files.touched)
433 if not _write_copy_meta(repo)[1]:
446 if not _write_copy_meta(repo)[1]:
434 # If writing only to changeset extras, use None to indicate that
447 # If writing only to changeset extras, use None to indicate that
435 # no entry should be written. If writing to both, write an empty
448 # no entry should be written. If writing to both, write an empty
436 # entry to prevent the reader from falling back to reading
449 # entry to prevent the reader from falling back to reading
437 # filelogs.
450 # filelogs.
438 p1copies = p1copies or None
451 p1copies = p1copies or None
439 p2copies = p2copies or None
452 p2copies = p2copies or None
440 filesadded = filesadded or None
453 filesadded = filesadded or None
441 filesremoved = filesremoved or None
454 filesremoved = filesremoved or None
442
455
443 extrasentries = p1copies, p2copies, filesadded, filesremoved
456 extrasentries = p1copies, p2copies, filesadded, filesremoved
444 if extra is None and any(x is not None for x in extrasentries):
457 if extra is None and any(x is not None for x in extrasentries):
445 extra = {}
458 extra = {}
446 if p1copies is not None:
459 if p1copies is not None:
447 p1copies = metadata.encodecopies(files, p1copies)
460 p1copies = metadata.encodecopies(files, p1copies)
448 extra[b'p1copies'] = p1copies
461 extra[b'p1copies'] = p1copies
449 if p2copies is not None:
462 if p2copies is not None:
450 p2copies = metadata.encodecopies(files, p2copies)
463 p2copies = metadata.encodecopies(files, p2copies)
451 extra[b'p2copies'] = p2copies
464 extra[b'p2copies'] = p2copies
452 if filesadded is not None:
465 if filesadded is not None:
453 filesadded = metadata.encodefileindices(files, filesadded)
466 filesadded = metadata.encodefileindices(files, filesadded)
454 extra[b'filesadded'] = filesadded
467 extra[b'filesadded'] = filesadded
455 if filesremoved is not None:
468 if filesremoved is not None:
456 filesremoved = metadata.encodefileindices(files, filesremoved)
469 filesremoved = metadata.encodefileindices(files, filesremoved)
457 extra[b'filesremoved'] = filesremoved
470 extra[b'filesremoved'] = filesremoved
458 return extra
471 return extra
General Comments 0
You need to be logged in to leave comments. Login now